浏览代码

Merge branch 'drm-next-4.2-amdgpu' of git://people.freedesktop.org/~agd5f/linux into drm-next

This is the big pull request for amdgpu, the new driver for VI+ AMD
asics.  I currently supports Tonga, Iceland, and Carrizo and also
contains a Kconfig option to build support for CI parts for testing.

All major functionality is supported (displays, gfx, compute, dma,
video decode/encode, etc.).  Power management is working on Carrizo,
but is still being worked on for Tonga and Iceland.

* 'drm-next-4.2-amdgpu' of git://people.freedesktop.org/~agd5f/linux: (106 commits)
  drm/amdgpu: only support IBs in the buffer list (v2)
  drm/amdgpu: add vram_type and vram_bit_width for interface query (v2)
  drm/amdgpu: add ib_size/start_alignment interface query
  drm/amdgpu: add me/ce/pfp_feature_version interface query
  drm/amdgpu add ce_ram_size for interface query
  drm/amdgpu add max_memory_clock for interface query (v2)
  drm/amdgpu: add hdp flush for gfx8 compute ring
  drm/amdgpu: fix no hdp flush for compute ring
  drm/amdgpu: add HEVC/H.265 UVD support
  drm/amdgpu: stop loading firmware with pm.mutex locked
  drm/amdgpu: remove mclk_lock
  drm/amdgpu: fix description of vm_size module parameter (v2)
  drm/amdgpu: remove all sh mem register modification in vm flush
  drm/amdgpu: rename GEM_OP_SET_INITIAL_DOMAIN -> GEM_OP_SET_PLACEMENT
  drm/amdgpu: fence should be added to shared slot
  drm/amdgpu: sync fence of clear_invalids (v2)
  drm/amdgpu: max_pde_used usage should be under protect
  drm/amdgpu: fix bug of vm_bo_map (v2)
  drm/amdgpu: implement the allocation range (v3)
  drm/amdgpu: rename amdgpu_ip_funcs to amd_ip_funcs (v2)
  ...
Dave Airlie 10 年之前
父节点
当前提交
a93fe8f8b4
共有 100 个文件被更改,包括 61972 次插入0 次删除
  1. 21 0
      drivers/gpu/drm/Kconfig
  2. 1 0
      drivers/gpu/drm/Makefile
  3. 17 0
      drivers/gpu/drm/amd/amdgpu/Kconfig
  4. 81 0
      drivers/gpu/drm/amd/amdgpu/Makefile
  5. 736 0
      drivers/gpu/drm/amd/amdgpu/ObjectID.h
  6. 2332 0
      drivers/gpu/drm/amd/amdgpu/amdgpu.h
  7. 768 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
  8. 445 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.h
  9. 105 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c
  10. 1598 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
  11. 206 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
  12. 572 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
  13. 221 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
  14. 363 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
  15. 268 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
  16. 1907 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
  17. 42 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.h
  18. 784 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
  19. 193 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
  20. 2003 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
  21. 832 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
  22. 955 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
  23. 85 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
  24. 545 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
  25. 48 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
  26. 245 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
  27. 62 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_family.h
  28. 421 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
  29. 1127 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
  30. 371 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
  31. 72 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
  32. 737 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
  33. 72 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
  34. 30 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
  35. 395 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
  36. 44 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
  37. 353 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
  38. 216 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
  39. 62 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
  40. 47 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c
  41. 458 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
  42. 92 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
  43. 697 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
  44. 322 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
  45. 586 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
  46. 671 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
  47. 203 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
  48. 350 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
  49. 38 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h
  50. 799 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
  51. 35 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
  52. 125 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
  53. 561 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
  54. 419 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
  55. 102 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
  56. 234 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
  57. 552 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
  58. 208 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
  59. 9 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
  60. 1215 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
  61. 317 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
  62. 176 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
  63. 984 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
  64. 39 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
  65. 724 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
  66. 47 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
  67. 1265 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
  68. 48 0
      drivers/gpu/drm/amd/amdgpu/atom-bits.h
  69. 100 0
      drivers/gpu/drm/amd/amdgpu/atom-names.h
  70. 42 0
      drivers/gpu/drm/amd/amdgpu/atom-types.h
  71. 1408 0
      drivers/gpu/drm/amd/amdgpu/atom.c
  72. 159 0
      drivers/gpu/drm/amd/amdgpu/atom.h
  73. 8555 0
      drivers/gpu/drm/amd/amdgpu/atombios.h
  74. 807 0
      drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
  75. 58 0
      drivers/gpu/drm/amd/amdgpu/atombios_crtc.h
  76. 775 0
      drivers/gpu/drm/amd/amdgpu/atombios_dp.c
  77. 42 0
      drivers/gpu/drm/amd/amdgpu/atombios_dp.h
  78. 2066 0
      drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
  79. 73 0
      drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
  80. 158 0
      drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
  81. 31 0
      drivers/gpu/drm/amd/amdgpu/atombios_i2c.h
  82. 6699 0
      drivers/gpu/drm/amd/amdgpu/ci_dpm.c
  83. 348 0
      drivers/gpu/drm/amd/amdgpu/ci_dpm.h
  84. 279 0
      drivers/gpu/drm/amd/amdgpu/ci_smc.c
  85. 2513 0
      drivers/gpu/drm/amd/amdgpu/cik.c
  86. 33 0
      drivers/gpu/drm/amd/amdgpu/cik.h
  87. 30 0
      drivers/gpu/drm/amd/amdgpu/cik_dpm.h
  88. 471 0
      drivers/gpu/drm/amd/amdgpu/cik_ih.c
  89. 29 0
      drivers/gpu/drm/amd/amdgpu/cik_ih.h
  90. 1405 0
      drivers/gpu/drm/amd/amdgpu/cik_sdma.c
  91. 29 0
      drivers/gpu/drm/amd/amdgpu/cik_sdma.h
  92. 555 0
      drivers/gpu/drm/amd/amdgpu/cikd.h
  93. 944 0
      drivers/gpu/drm/amd/amdgpu/clearstate_ci.h
  94. 44 0
      drivers/gpu/drm/amd/amdgpu/clearstate_defs.h
  95. 944 0
      drivers/gpu/drm/amd/amdgpu/clearstate_vi.h
  96. 1814 0
      drivers/gpu/drm/amd/amdgpu/cz_dpm.c
  97. 237 0
      drivers/gpu/drm/amd/amdgpu/cz_dpm.h
  98. 452 0
      drivers/gpu/drm/amd/amdgpu/cz_ih.c
  99. 29 0
      drivers/gpu/drm/amd/amdgpu/cz_ih.h
  100. 185 0
      drivers/gpu/drm/amd/amdgpu/cz_ppsmc.h

+ 21 - 0
drivers/gpu/drm/Kconfig

@@ -120,6 +120,27 @@ config DRM_RADEON
 
 source "drivers/gpu/drm/radeon/Kconfig"
 
+config DRM_AMDGPU
+	tristate "AMD GPU"
+	depends on DRM && PCI
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	select FW_LOADER
+        select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
+        select DRM_TTM
+	select POWER_SUPPLY
+	select HWMON
+	select BACKLIGHT_CLASS_DEVICE
+	select INTERVAL_TREE
+	help
+	  Choose this option if you have a recent AMD Radeon graphics card.
+
+	  If M is selected, the module will be called amdgpu.
+
+source "drivers/gpu/drm/amd/amdgpu/Kconfig"
+
 source "drivers/gpu/drm/nouveau/Kconfig"
 
 config DRM_I810

+ 1 - 0
drivers/gpu/drm/Makefile

@@ -37,6 +37,7 @@ obj-$(CONFIG_DRM_TDFX)	+= tdfx/
 obj-$(CONFIG_DRM_R128)	+= r128/
 obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
 obj-$(CONFIG_DRM_RADEON)+= radeon/
+obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
 obj-$(CONFIG_DRM_MGA)	+= mga/
 obj-$(CONFIG_DRM_I810)	+= i810/
 obj-$(CONFIG_DRM_I915)  += i915/

+ 17 - 0
drivers/gpu/drm/amd/amdgpu/Kconfig

@@ -0,0 +1,17 @@
+config DRM_AMDGPU_CIK
+	bool "Enable amdgpu support for CIK parts"
+	depends on DRM_AMDGPU
+	help
+	  Choose this option if you want to enable experimental support
+	  for CIK asics.
+
+	  CIK is already supported in radeon.  CIK support in amdgpu
+	  is for experimentation and testing.
+
+config DRM_AMDGPU_USERPTR
+	bool "Always enable userptr write support"
+	depends on DRM_AMDGPU
+	select MMU_NOTIFIER
+	help
+	  This option selects CONFIG_MMU_NOTIFIER if it isn't already
+	  selected to enabled full userptr support.

+ 81 - 0
drivers/gpu/drm/amd/amdgpu/Makefile

@@ -0,0 +1,81 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/asic_reg \
+	-Idrivers/gpu/drm/amd/include
+
+amdgpu-y := amdgpu_drv.o
+
+# add KMS driver
+amdgpu-y += amdgpu_device.o amdgpu_kms.o \
+	amdgpu_atombios.o atombios_crtc.o amdgpu_connectors.o \
+	atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \
+	amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \
+	amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \
+	amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \
+	amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
+	atombios_encoders.o amdgpu_semaphore.o amdgpu_sa.o atombios_i2c.o \
+	amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
+	amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
+
+# add asic specific block
+amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \
+	ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o
+
+amdgpu-y += \
+	vi.o
+
+# add GMC block
+amdgpu-y += \
+	gmc_v8_0.o
+
+# add IH block
+amdgpu-y += \
+	amdgpu_irq.o \
+	amdgpu_ih.o \
+	iceland_ih.o \
+	tonga_ih.o \
+	cz_ih.o
+
+# add SMC block
+amdgpu-y += \
+	amdgpu_dpm.o \
+	cz_smc.o cz_dpm.o \
+	tonga_smc.o tonga_dpm.o \
+	iceland_smc.o iceland_dpm.o
+
+# add DCE block
+amdgpu-y += \
+	dce_v10_0.o \
+	dce_v11_0.o
+
+# add GFX block
+amdgpu-y += \
+	amdgpu_gfx.o \
+	gfx_v8_0.o
+
+# add async DMA block
+amdgpu-y += \
+	sdma_v2_4.o \
+	sdma_v3_0.o
+
+# add UVD block
+amdgpu-y += \
+	amdgpu_uvd.o \
+	uvd_v5_0.o \
+	uvd_v6_0.o
+
+# add VCE block
+amdgpu-y += \
+	amdgpu_vce.o \
+	vce_v3_0.o
+
+amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o
+amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
+amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
+amdgpu-$(CONFIG_MMU_NOTIFIER) += amdgpu_mn.o
+
+obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
+
+CFLAGS_amdgpu_trace_points.o := -I$(src)

+ 736 - 0
drivers/gpu/drm/amd/amdgpu/ObjectID.h

@@ -0,0 +1,736 @@
+/*
+* Copyright 2006-2007 Advanced Micro Devices, Inc.  
+*
+* Permission is hereby granted, free of charge, to any person obtaining a
+* copy of this software and associated documentation files (the "Software"),
+* to deal in the Software without restriction, including without limitation
+* the rights to use, copy, modify, merge, publish, distribute, sublicense,
+* and/or sell copies of the Software, and to permit persons to whom the
+* Software is furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+* OTHER DEALINGS IN THE SOFTWARE.
+*/
+/* based on stg/asic_reg/drivers/inc/asic_reg/ObjectID.h ver 23 */
+
+#ifndef _OBJECTID_H
+#define _OBJECTID_H
+
+#if defined(_X86_)
+#pragma pack(1)
+#endif
+
+/****************************************************/
+/* Graphics Object Type Definition                  */
+/****************************************************/
+#define GRAPH_OBJECT_TYPE_NONE                    0x0
+#define GRAPH_OBJECT_TYPE_GPU                     0x1
+#define GRAPH_OBJECT_TYPE_ENCODER                 0x2
+#define GRAPH_OBJECT_TYPE_CONNECTOR               0x3
+#define GRAPH_OBJECT_TYPE_ROUTER                  0x4
+/* deleted */
+#define GRAPH_OBJECT_TYPE_DISPLAY_PATH            0x6  
+#define GRAPH_OBJECT_TYPE_GENERIC                 0x7
+
+/****************************************************/
+/* Encoder Object ID Definition                     */
+/****************************************************/
+#define ENCODER_OBJECT_ID_NONE                    0x00 
+
+/* Radeon Class Display Hardware */
+#define ENCODER_OBJECT_ID_INTERNAL_LVDS           0x01
+#define ENCODER_OBJECT_ID_INTERNAL_TMDS1          0x02
+#define ENCODER_OBJECT_ID_INTERNAL_TMDS2          0x03
+#define ENCODER_OBJECT_ID_INTERNAL_DAC1           0x04
+#define ENCODER_OBJECT_ID_INTERNAL_DAC2           0x05     /* TV/CV DAC */
+#define ENCODER_OBJECT_ID_INTERNAL_SDVOA          0x06
+#define ENCODER_OBJECT_ID_INTERNAL_SDVOB          0x07
+
+/* External Third Party Encoders */
+#define ENCODER_OBJECT_ID_SI170B                  0x08
+#define ENCODER_OBJECT_ID_CH7303                  0x09
+#define ENCODER_OBJECT_ID_CH7301                  0x0A
+#define ENCODER_OBJECT_ID_INTERNAL_DVO1           0x0B    /* This belongs to Radeon Class Display Hardware */
+#define ENCODER_OBJECT_ID_EXTERNAL_SDVOA          0x0C
+#define ENCODER_OBJECT_ID_EXTERNAL_SDVOB          0x0D
+#define ENCODER_OBJECT_ID_TITFP513                0x0E
+#define ENCODER_OBJECT_ID_INTERNAL_LVTM1          0x0F    /* not used for Radeon */
+#define ENCODER_OBJECT_ID_VT1623                  0x10
+#define ENCODER_OBJECT_ID_HDMI_SI1930             0x11
+#define ENCODER_OBJECT_ID_HDMI_INTERNAL           0x12
+#define ENCODER_OBJECT_ID_ALMOND                  0x22
+#define ENCODER_OBJECT_ID_TRAVIS                  0x23
+#define ENCODER_OBJECT_ID_NUTMEG                  0x22
+#define ENCODER_OBJECT_ID_HDMI_ANX9805            0x26
+
+/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1   0x13
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1    0x14
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1    0x15
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2    0x16  /* Shared with CV/TV and CRT */
+#define ENCODER_OBJECT_ID_SI178                   0X17  /* External TMDS (dual link, no HDCP.) */
+#define ENCODER_OBJECT_ID_MVPU_FPGA               0x18  /* MVPU FPGA chip */
+#define ENCODER_OBJECT_ID_INTERNAL_DDI            0x19
+#define ENCODER_OBJECT_ID_VT1625                  0x1A
+#define ENCODER_OBJECT_ID_HDMI_SI1932             0x1B
+#define ENCODER_OBJECT_ID_DP_AN9801               0x1C
+#define ENCODER_OBJECT_ID_DP_DP501                0x1D
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY         0x1E
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA   0x1F
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY1        0x20
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY2        0x21
+#define ENCODER_OBJECT_ID_INTERNAL_VCE            0x24
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3        0x25
+#define ENCODER_OBJECT_ID_INTERNAL_AMCLK          0x27
+
+#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO    0xFF
+
+/****************************************************/
+/* Connector Object ID Definition                   */
+/****************************************************/
+#define CONNECTOR_OBJECT_ID_NONE                  0x00 
+#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I     0x01
+#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I       0x02
+#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D     0x03
+#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D       0x04
+#define CONNECTOR_OBJECT_ID_VGA                   0x05
+#define CONNECTOR_OBJECT_ID_COMPOSITE             0x06
+#define CONNECTOR_OBJECT_ID_SVIDEO                0x07
+#define CONNECTOR_OBJECT_ID_YPbPr                 0x08
+#define CONNECTOR_OBJECT_ID_D_CONNECTOR           0x09
+#define CONNECTOR_OBJECT_ID_9PIN_DIN              0x0A  /* Supports both CV & TV */
+#define CONNECTOR_OBJECT_ID_SCART                 0x0B
+#define CONNECTOR_OBJECT_ID_HDMI_TYPE_A           0x0C
+#define CONNECTOR_OBJECT_ID_HDMI_TYPE_B           0x0D
+#define CONNECTOR_OBJECT_ID_LVDS                  0x0E
+#define CONNECTOR_OBJECT_ID_7PIN_DIN              0x0F
+#define CONNECTOR_OBJECT_ID_PCIE_CONNECTOR        0x10
+#define CONNECTOR_OBJECT_ID_CROSSFIRE             0x11
+#define CONNECTOR_OBJECT_ID_HARDCODE_DVI          0x12
+#define CONNECTOR_OBJECT_ID_DISPLAYPORT           0x13
+#define CONNECTOR_OBJECT_ID_eDP                   0x14
+#define CONNECTOR_OBJECT_ID_MXM                   0x15
+#define CONNECTOR_OBJECT_ID_LVDS_eDP              0x16
+
+/* deleted */
+
+/****************************************************/
+/* Router Object ID Definition                      */
+/****************************************************/
+#define ROUTER_OBJECT_ID_NONE											0x00
+#define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL				0x01
+
+/****************************************************/
+/* Generic Object ID Definition                     */
+/****************************************************/
+#define GENERIC_OBJECT_ID_NONE                    0x00
+#define GENERIC_OBJECT_ID_GLSYNC                  0x01
+#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE        0x02
+#define GENERIC_OBJECT_ID_MXM_OPM                 0x03
+#define GENERIC_OBJECT_ID_STEREO_PIN              0x04        //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin
+
+/****************************************************/
+/* Graphics Object ENUM ID Definition               */
+/****************************************************/
+#define GRAPH_OBJECT_ENUM_ID1                     0x01
+#define GRAPH_OBJECT_ENUM_ID2                     0x02
+#define GRAPH_OBJECT_ENUM_ID3                     0x03
+#define GRAPH_OBJECT_ENUM_ID4                     0x04
+#define GRAPH_OBJECT_ENUM_ID5                     0x05
+#define GRAPH_OBJECT_ENUM_ID6                     0x06
+#define GRAPH_OBJECT_ENUM_ID7                     0x07
+
+/****************************************************/
+/* Graphics Object ID Bit definition                */
+/****************************************************/
+#define OBJECT_ID_MASK                            0x00FF
+#define ENUM_ID_MASK                              0x0700
+#define RESERVED1_ID_MASK                         0x0800
+#define OBJECT_TYPE_MASK                          0x7000
+#define RESERVED2_ID_MASK                         0x8000
+                                                  
+#define OBJECT_ID_SHIFT                           0x00
+#define ENUM_ID_SHIFT                             0x08
+#define OBJECT_TYPE_SHIFT                         0x0C
+
+
+/****************************************************/
+/* Graphics Object family definition                */
+/****************************************************/
+#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
+                                                                           GRAPHICS_OBJECT_ID   << OBJECT_ID_SHIFT)
+/****************************************************/
+/* GPU Object ID definition - Shared with BIOS      */
+/****************************************************/
+#define GPU_ENUM_ID1                            ( GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
+
+/****************************************************/
+/* Encoder Object ID definition - Shared with BIOS  */
+/****************************************************/
+/*
+#define ENCODER_INTERNAL_LVDS_ENUM_ID1        0x2101      
+#define ENCODER_INTERNAL_TMDS1_ENUM_ID1       0x2102
+#define ENCODER_INTERNAL_TMDS2_ENUM_ID1       0x2103
+#define ENCODER_INTERNAL_DAC1_ENUM_ID1        0x2104
+#define ENCODER_INTERNAL_DAC2_ENUM_ID1        0x2105
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID1       0x2106
+#define ENCODER_INTERNAL_SDVOB_ENUM_ID1       0x2107
+#define ENCODER_SIL170B_ENUM_ID1              0x2108  
+#define ENCODER_CH7303_ENUM_ID1               0x2109
+#define ENCODER_CH7301_ENUM_ID1               0x210A
+#define ENCODER_INTERNAL_DVO1_ENUM_ID1        0x210B
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1       0x210C
+#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1       0x210D
+#define ENCODER_TITFP513_ENUM_ID1             0x210E
+#define ENCODER_INTERNAL_LVTM1_ENUM_ID1       0x210F
+#define ENCODER_VT1623_ENUM_ID1               0x2110
+#define ENCODER_HDMI_SI1930_ENUM_ID1          0x2111
+#define ENCODER_HDMI_INTERNAL_ENUM_ID1        0x2112
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1   0x2113
+#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1    0x2114
+#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1    0x2115
+#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1    0x2116  
+#define ENCODER_SI178_ENUM_ID1                   0x2117 
+#define ENCODER_MVPU_FPGA_ENUM_ID1               0x2118
+#define ENCODER_INTERNAL_DDI_ENUM_ID1            0x2119
+#define ENCODER_VT1625_ENUM_ID1                  0x211A
+#define ENCODER_HDMI_SI1932_ENUM_ID1             0x211B
+#define ENCODER_ENCODER_DP_AN9801_ENUM_ID1       0x211C
+#define ENCODER_DP_DP501_ENUM_ID1                0x211D
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1         0x211E
+*/
+#define ENCODER_INTERNAL_LVDS_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_TMDS1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_TMDS2_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DAC1_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DAC2_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID2    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOB_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
+
+#define ENCODER_SIL170B_ENUM_ID1           ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
+
+#define ENCODER_CH7303_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
+
+#define ENCODER_CH7301_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DVO1_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_TITFP513_ENUM_ID1          ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_LVTM1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_VT1623_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_SI1930_ENUM_ID1       ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_INTERNAL_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1   ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2   ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT)  // Shared with CV/TV and CRT
+
+#define ENCODER_SI178_ENUM_ID1                    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)  
+
+#define ENCODER_MVPU_FPGA_ENUM_ID1                ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DDI_ENUM_ID1     (  GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT) 
+
+#define ENCODER_VT1625_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_SI1932_ENUM_ID1       ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
+
+#define ENCODER_DP_DP501_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
+
+#define ENCODER_DP_AN9801_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1   ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)  
+
+#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY3_ENUM_ID1         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY3_ENUM_ID2         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 << OBJECT_ID_SHIFT)
+
+#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
+
+#define ENCODER_ALMOND_ENUM_ID1                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
+
+#define ENCODER_ALMOND_ENUM_ID2                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
+
+#define ENCODER_TRAVIS_ENUM_ID1                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
+
+#define ENCODER_TRAVIS_ENUM_ID2                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
+
+#define ENCODER_NUTMEG_ENUM_ID1                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_NUTMEG << OBJECT_ID_SHIFT)
+
+#define ENCODER_VCE_ENUM_ID1                     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_INTERNAL_VCE << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_ANX9805_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT)
+
+/****************************************************/
+/* Connector Object ID definition - Shared with BIOS */
+/****************************************************/
+/*
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1        0x3101
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1          0x3102
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1        0x3103
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1          0x3104
+#define CONNECTOR_VGA_ENUM_ID1                      0x3105
+#define CONNECTOR_COMPOSITE_ENUM_ID1                0x3106
+#define CONNECTOR_SVIDEO_ENUM_ID1                   0x3107
+#define CONNECTOR_YPbPr_ENUM_ID1                    0x3108
+#define CONNECTOR_D_CONNECTORE_ENUM_ID1             0x3109
+#define CONNECTOR_9PIN_DIN_ENUM_ID1                 0x310A
+#define CONNECTOR_SCART_ENUM_ID1                    0x310B
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1              0x310C
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1              0x310D
+#define CONNECTOR_LVDS_ENUM_ID1                     0x310E
+#define CONNECTOR_7PIN_DIN_ENUM_ID1                 0x310F
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1           0x3110
+*/
+#define CONNECTOR_LVDS_ENUM_ID1                ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_LVDS_ENUM_ID2                ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_eDP_ENUM_ID1                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_eDP_ENUM_ID2                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID3   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID4   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID5   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID6   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID2     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID3     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID4     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_VGA_ENUM_ID1                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_VGA_ENUM_ID2                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_COMPOSITE_ENUM_ID1           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_COMPOSITE_ENUM_ID2           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SVIDEO_ENUM_ID1              ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SVIDEO_ENUM_ID2              ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_YPbPr_ENUM_ID1               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_YPbPr_ENUM_ID2               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_D_CONNECTOR_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_D_CONNECTOR_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_9PIN_DIN_ENUM_ID1            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_9PIN_DIN_ENUM_ID2            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SCART_ENUM_ID1               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SCART_ENUM_ID2               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID3         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID4         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID5         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID6         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_7PIN_DIN_ENUM_ID1            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_7PIN_DIN_ENUM_ID2            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1      ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2      ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_CROSSFIRE_ENUM_ID1           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_CROSSFIRE_ENUM_ID2           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+
+
+#define CONNECTOR_HARDCODE_DVI_ENUM_ID1        ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HARDCODE_DVI_ENUM_ID2        ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID3         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID4         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID5         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID6         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_MXM_ENUM_ID1                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_A
+
+#define CONNECTOR_MXM_ENUM_ID2                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_B
+
+#define CONNECTOR_MXM_ENUM_ID3                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_C
+
+#define CONNECTOR_MXM_ENUM_ID4                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_D
+
+#define CONNECTOR_MXM_ENUM_ID5                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_LVDS_TXxx
+
+#define CONNECTOR_MXM_ENUM_ID6                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_LVDS_UXxx
+
+#define CONNECTOR_MXM_ENUM_ID7                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DAC
+
+#define CONNECTOR_LVDS_eDP_ENUM_ID1            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_LVDS_eDP_ENUM_ID2            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
+
+/****************************************************/
+/* Router Object ID definition - Shared with BIOS   */
+/****************************************************/
+#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1      ( GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
+                                                GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
+
+/* deleted */
+
+/****************************************************/
+/* Generic Object ID definition - Shared with BIOS  */
+/****************************************************/
+#define GENERICOBJECT_GLSYNC_ENUM_ID1           (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_GLSYNC << OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_PX2_NON_DRIVABLE_ID1       (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_PX2_NON_DRIVABLE_ID2       (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_MXM_OPM_ENUM_ID1           (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_STEREO_PIN_ENUM_ID1        (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT)
+
+/****************************************************/
+/* Object Cap definition - Shared with BIOS         */
+/****************************************************/
+#define GRAPHICS_OBJECT_CAP_I2C                 0x00000001L
+#define GRAPHICS_OBJECT_CAP_TABLE_ID            0x00000002L
+
+
+#define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID                   0x01
+#define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID     0x02
+#define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID    0x03
+
+#if defined(_X86_)
+#pragma pack()
+#endif
+
+#endif  /*GRAPHICTYPE */
+
+
+
+

+ 2332 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu.h

@@ -0,0 +1,2332 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __AMDGPU_H__
+#define __AMDGPU_H__
+
+#include <linux/atomic.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/kref.h>
+#include <linux/interval_tree.h>
+#include <linux/hashtable.h>
+#include <linux/fence.h>
+
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+#include <ttm/ttm_execbuf_util.h>
+
+#include <drm/drm_gem.h>
+#include <drm/amdgpu_drm.h>
+
+#include "amd_shared.h"
+#include "amdgpu_family.h"
+#include "amdgpu_mode.h"
+#include "amdgpu_ih.h"
+#include "amdgpu_irq.h"
+#include "amdgpu_ucode.h"
+#include "amdgpu_gds.h"
+
+/*
+ * Modules parameters.
+ */
+extern int amdgpu_modeset;
+extern int amdgpu_vram_limit;
+extern int amdgpu_gart_size;
+extern int amdgpu_benchmarking;
+extern int amdgpu_testing;
+extern int amdgpu_audio;
+extern int amdgpu_disp_priority;
+extern int amdgpu_hw_i2c;
+extern int amdgpu_pcie_gen2;
+extern int amdgpu_msi;
+extern int amdgpu_lockup_timeout;
+extern int amdgpu_dpm;
+extern int amdgpu_smc_load_fw;
+extern int amdgpu_aspm;
+extern int amdgpu_runtime_pm;
+extern int amdgpu_hard_reset;
+extern unsigned amdgpu_ip_block_mask;
+extern int amdgpu_bapm;
+extern int amdgpu_deep_color;
+extern int amdgpu_vm_size;
+extern int amdgpu_vm_block_size;
+
+#define AMDGPU_MAX_USEC_TIMEOUT			100000	/* 100 ms */
+#define AMDGPU_FENCE_JIFFIES_TIMEOUT		(HZ / 2)
+/* AMDGPU_IB_POOL_SIZE must be a power of 2 */
+#define AMDGPU_IB_POOL_SIZE			16
+#define AMDGPU_DEBUGFS_MAX_COMPONENTS		32
+#define AMDGPUFB_CONN_LIMIT			4
+#define AMDGPU_BIOS_NUM_SCRATCH			8
+
+/* max number of rings */
+#define AMDGPU_MAX_RINGS			16
+#define AMDGPU_MAX_GFX_RINGS			1
+#define AMDGPU_MAX_COMPUTE_RINGS		8
+#define AMDGPU_MAX_VCE_RINGS			2
+
+/* number of hw syncs before falling back on blocking */
+#define AMDGPU_NUM_SYNCS			4
+
+/* hardcode that limit for now */
+#define AMDGPU_VA_RESERVED_SIZE			(8 << 20)
+
+/* hard reset data */
+#define AMDGPU_ASIC_RESET_DATA                  0x39d5e86b
+
+/* reset flags */
+#define AMDGPU_RESET_GFX			(1 << 0)
+#define AMDGPU_RESET_COMPUTE			(1 << 1)
+#define AMDGPU_RESET_DMA			(1 << 2)
+#define AMDGPU_RESET_CP				(1 << 3)
+#define AMDGPU_RESET_GRBM			(1 << 4)
+#define AMDGPU_RESET_DMA1			(1 << 5)
+#define AMDGPU_RESET_RLC			(1 << 6)
+#define AMDGPU_RESET_SEM			(1 << 7)
+#define AMDGPU_RESET_IH				(1 << 8)
+#define AMDGPU_RESET_VMC			(1 << 9)
+#define AMDGPU_RESET_MC				(1 << 10)
+#define AMDGPU_RESET_DISPLAY			(1 << 11)
+#define AMDGPU_RESET_UVD			(1 << 12)
+#define AMDGPU_RESET_VCE			(1 << 13)
+#define AMDGPU_RESET_VCE1			(1 << 14)
+
+/* CG block flags */
+#define AMDGPU_CG_BLOCK_GFX			(1 << 0)
+#define AMDGPU_CG_BLOCK_MC			(1 << 1)
+#define AMDGPU_CG_BLOCK_SDMA			(1 << 2)
+#define AMDGPU_CG_BLOCK_UVD			(1 << 3)
+#define AMDGPU_CG_BLOCK_VCE			(1 << 4)
+#define AMDGPU_CG_BLOCK_HDP			(1 << 5)
+#define AMDGPU_CG_BLOCK_BIF			(1 << 6)
+
+/* CG flags */
+#define AMDGPU_CG_SUPPORT_GFX_MGCG		(1 << 0)
+#define AMDGPU_CG_SUPPORT_GFX_MGLS		(1 << 1)
+#define AMDGPU_CG_SUPPORT_GFX_CGCG		(1 << 2)
+#define AMDGPU_CG_SUPPORT_GFX_CGLS		(1 << 3)
+#define AMDGPU_CG_SUPPORT_GFX_CGTS		(1 << 4)
+#define AMDGPU_CG_SUPPORT_GFX_CGTS_LS		(1 << 5)
+#define AMDGPU_CG_SUPPORT_GFX_CP_LS		(1 << 6)
+#define AMDGPU_CG_SUPPORT_GFX_RLC_LS		(1 << 7)
+#define AMDGPU_CG_SUPPORT_MC_LS			(1 << 8)
+#define AMDGPU_CG_SUPPORT_MC_MGCG		(1 << 9)
+#define AMDGPU_CG_SUPPORT_SDMA_LS		(1 << 10)
+#define AMDGPU_CG_SUPPORT_SDMA_MGCG		(1 << 11)
+#define AMDGPU_CG_SUPPORT_BIF_LS		(1 << 12)
+#define AMDGPU_CG_SUPPORT_UVD_MGCG		(1 << 13)
+#define AMDGPU_CG_SUPPORT_VCE_MGCG		(1 << 14)
+#define AMDGPU_CG_SUPPORT_HDP_LS		(1 << 15)
+#define AMDGPU_CG_SUPPORT_HDP_MGCG		(1 << 16)
+
+/* PG flags */
+#define AMDGPU_PG_SUPPORT_GFX_PG		(1 << 0)
+#define AMDGPU_PG_SUPPORT_GFX_SMG		(1 << 1)
+#define AMDGPU_PG_SUPPORT_GFX_DMG		(1 << 2)
+#define AMDGPU_PG_SUPPORT_UVD			(1 << 3)
+#define AMDGPU_PG_SUPPORT_VCE			(1 << 4)
+#define AMDGPU_PG_SUPPORT_CP			(1 << 5)
+#define AMDGPU_PG_SUPPORT_GDS			(1 << 6)
+#define AMDGPU_PG_SUPPORT_RLC_SMU_HS		(1 << 7)
+#define AMDGPU_PG_SUPPORT_SDMA			(1 << 8)
+#define AMDGPU_PG_SUPPORT_ACP			(1 << 9)
+#define AMDGPU_PG_SUPPORT_SAMU			(1 << 10)
+
+/* GFX current status */
+#define AMDGPU_GFX_NORMAL_MODE			0x00000000L
+#define AMDGPU_GFX_SAFE_MODE			0x00000001L
+#define AMDGPU_GFX_PG_DISABLED_MODE		0x00000002L
+#define AMDGPU_GFX_CG_DISABLED_MODE		0x00000004L
+#define AMDGPU_GFX_LBPW_DISABLED_MODE		0x00000008L
+
+/* max cursor sizes (in pixels) */
+#define CIK_CURSOR_WIDTH 128
+#define CIK_CURSOR_HEIGHT 128
+
+struct amdgpu_device;
+struct amdgpu_fence;
+struct amdgpu_ib;
+struct amdgpu_vm;
+struct amdgpu_ring;
+struct amdgpu_semaphore;
+struct amdgpu_cs_parser;
+struct amdgpu_irq_src;
+
+enum amdgpu_cp_irq {
+	AMDGPU_CP_IRQ_GFX_EOP = 0,
+	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
+	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
+	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
+	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP,
+	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP,
+	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP,
+	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP,
+	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP,
+
+	AMDGPU_CP_IRQ_LAST
+};
+
+enum amdgpu_sdma_irq {
+	AMDGPU_SDMA_IRQ_TRAP0 = 0,
+	AMDGPU_SDMA_IRQ_TRAP1,
+
+	AMDGPU_SDMA_IRQ_LAST
+};
+
+enum amdgpu_thermal_irq {
+	AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
+	AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
+
+	AMDGPU_THERMAL_IRQ_LAST
+};
+
+int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
+				  enum amd_ip_block_type block_type,
+				  enum amd_clockgating_state state);
+int amdgpu_set_powergating_state(struct amdgpu_device *adev,
+				  enum amd_ip_block_type block_type,
+				  enum amd_powergating_state state);
+
+struct amdgpu_ip_block_version {
+	enum amd_ip_block_type type;
+	u32 major;
+	u32 minor;
+	u32 rev;
+	const struct amd_ip_funcs *funcs;
+};
+
+int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
+				enum amd_ip_block_type type,
+				u32 major, u32 minor);
+
+const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
+					struct amdgpu_device *adev,
+					enum amd_ip_block_type type);
+
+/* provided by hw blocks that can move/clear data.  e.g., gfx or sdma */
+struct amdgpu_buffer_funcs {
+	/* maximum bytes in a single operation */
+	uint32_t	copy_max_bytes;
+
+	/* number of dw to reserve per operation */
+	unsigned	copy_num_dw;
+
+	/* used for buffer migration */
+	void (*emit_copy_buffer)(struct amdgpu_ring *ring,
+				 /* src addr in bytes */
+				 uint64_t src_offset,
+				 /* dst addr in bytes */
+				 uint64_t dst_offset,
+				 /* number of byte to transfer */
+				 uint32_t byte_count);
+
+	/* maximum bytes in a single operation */
+	uint32_t	fill_max_bytes;
+
+	/* number of dw to reserve per operation */
+	unsigned	fill_num_dw;
+
+	/* used for buffer clearing */
+	void (*emit_fill_buffer)(struct amdgpu_ring *ring,
+				 /* value to write to memory */
+				 uint32_t src_data,
+				 /* dst addr in bytes */
+				 uint64_t dst_offset,
+				 /* number of byte to fill */
+				 uint32_t byte_count);
+};
+
+/* provided by hw blocks that can write ptes, e.g., sdma */
+struct amdgpu_vm_pte_funcs {
+	/* copy pte entries from GART */
+	void (*copy_pte)(struct amdgpu_ib *ib,
+			 uint64_t pe, uint64_t src,
+			 unsigned count);
+	/* write pte one entry at a time with addr mapping */
+	void (*write_pte)(struct amdgpu_ib *ib,
+			  uint64_t pe,
+			  uint64_t addr, unsigned count,
+			  uint32_t incr, uint32_t flags);
+	/* for linear pte/pde updates without addr mapping */
+	void (*set_pte_pde)(struct amdgpu_ib *ib,
+			    uint64_t pe,
+			    uint64_t addr, unsigned count,
+			    uint32_t incr, uint32_t flags);
+	/* pad the indirect buffer to the necessary number of dw */
+	void (*pad_ib)(struct amdgpu_ib *ib);
+};
+
+/* provided by the gmc block */
+struct amdgpu_gart_funcs {
+	/* flush the vm tlb via mmio */
+	void (*flush_gpu_tlb)(struct amdgpu_device *adev,
+			      uint32_t vmid);
+	/* write pte/pde updates using the cpu */
+	int (*set_pte_pde)(struct amdgpu_device *adev,
+			   void *cpu_pt_addr, /* cpu addr of page table */
+			   uint32_t gpu_page_idx, /* pte/pde to update */
+			   uint64_t addr, /* addr to write into pte/pde */
+			   uint32_t flags); /* access flags */
+};
+
+/* provided by the ih block */
+struct amdgpu_ih_funcs {
+	/* ring read/write ptr handling, called from interrupt context */
+	u32 (*get_wptr)(struct amdgpu_device *adev);
+	void (*decode_iv)(struct amdgpu_device *adev,
+			  struct amdgpu_iv_entry *entry);
+	void (*set_rptr)(struct amdgpu_device *adev);
+};
+
+/* provided by hw blocks that expose a ring buffer for commands */
+struct amdgpu_ring_funcs {
+	/* ring read/write ptr handling */
+	u32 (*get_rptr)(struct amdgpu_ring *ring);
+	u32 (*get_wptr)(struct amdgpu_ring *ring);
+	void (*set_wptr)(struct amdgpu_ring *ring);
+	/* validating and patching of IBs */
+	int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+	/* command emit functions */
+	void (*emit_ib)(struct amdgpu_ring *ring,
+			struct amdgpu_ib *ib);
+	void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
+			   uint64_t seq, bool write64bit);
+	bool (*emit_semaphore)(struct amdgpu_ring *ring,
+			       struct amdgpu_semaphore *semaphore,
+			       bool emit_wait);
+	void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
+			      uint64_t pd_addr);
+	void (*emit_hdp_flush)(struct amdgpu_ring *ring);
+	void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
+				uint32_t gds_base, uint32_t gds_size,
+				uint32_t gws_base, uint32_t gws_size,
+				uint32_t oa_base, uint32_t oa_size);
+	/* testing functions */
+	int (*test_ring)(struct amdgpu_ring *ring);
+	int (*test_ib)(struct amdgpu_ring *ring);
+	bool (*is_lockup)(struct amdgpu_ring *ring);
+};
+
+/*
+ * BIOS.
+ */
+bool amdgpu_get_bios(struct amdgpu_device *adev);
+bool amdgpu_read_bios(struct amdgpu_device *adev);
+
+/*
+ * Dummy page
+ */
+struct amdgpu_dummy_page {
+	struct page	*page;
+	dma_addr_t	addr;
+};
+int amdgpu_dummy_page_init(struct amdgpu_device *adev);
+void amdgpu_dummy_page_fini(struct amdgpu_device *adev);
+
+
+/*
+ * Clocks
+ */
+
+#define AMDGPU_MAX_PPLL 3
+
+struct amdgpu_clock {
+	struct amdgpu_pll ppll[AMDGPU_MAX_PPLL];
+	struct amdgpu_pll spll;
+	struct amdgpu_pll mpll;
+	/* 10 Khz units */
+	uint32_t default_mclk;
+	uint32_t default_sclk;
+	uint32_t default_dispclk;
+	uint32_t current_dispclk;
+	uint32_t dp_extclk;
+	uint32_t max_pixel_clock;
+};
+
+/*
+ * Fences.
+ */
+struct amdgpu_fence_driver {
+	struct amdgpu_ring		*ring;
+	uint64_t			gpu_addr;
+	volatile uint32_t		*cpu_addr;
+	/* sync_seq is protected by ring emission lock */
+	uint64_t			sync_seq[AMDGPU_MAX_RINGS];
+	atomic64_t			last_seq;
+	bool				initialized;
+	bool				delayed_irq;
+	struct amdgpu_irq_src		*irq_src;
+	unsigned			irq_type;
+	struct delayed_work             lockup_work;
+};
+
+/* some special values for the owner field */
+#define AMDGPU_FENCE_OWNER_UNDEFINED	((void*)0ul)
+#define AMDGPU_FENCE_OWNER_VM		((void*)1ul)
+#define AMDGPU_FENCE_OWNER_MOVE		((void*)2ul)
+
+struct amdgpu_fence {
+	struct fence base;
+
+	/* RB, DMA, etc. */
+	struct amdgpu_ring		*ring;
+	uint64_t			seq;
+
+	/* filp or special value for fence creator */
+	void				*owner;
+
+	wait_queue_t			fence_wake;
+};
+
+struct amdgpu_user_fence {
+	/* write-back bo */
+	struct amdgpu_bo 	*bo;
+	/* write-back address offset to bo start */
+	uint32_t                offset;
+};
+
+int amdgpu_fence_driver_init(struct amdgpu_device *adev);
+void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
+void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
+
+void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
+int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
+				   struct amdgpu_irq_src *irq_src,
+				   unsigned irq_type);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
+		      struct amdgpu_fence **fence);
+void amdgpu_fence_process(struct amdgpu_ring *ring);
+int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
+int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
+unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
+
+bool amdgpu_fence_signaled(struct amdgpu_fence *fence);
+int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
+int amdgpu_fence_wait_any(struct amdgpu_device *adev,
+			  struct amdgpu_fence **fences,
+			  bool intr);
+long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,
+				   u64 *target_seq, bool intr,
+				   long timeout);
+struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
+void amdgpu_fence_unref(struct amdgpu_fence **fence);
+
+bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
+			    struct amdgpu_ring *ring);
+void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
+			    struct amdgpu_ring *ring);
+
+static inline struct amdgpu_fence *amdgpu_fence_later(struct amdgpu_fence *a,
+						      struct amdgpu_fence *b)
+{
+	if (!a) {
+		return b;
+	}
+
+	if (!b) {
+		return a;
+	}
+
+	BUG_ON(a->ring != b->ring);
+
+	if (a->seq > b->seq) {
+		return a;
+	} else {
+		return b;
+	}
+}
+
+static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a,
+					   struct amdgpu_fence *b)
+{
+	if (!a) {
+		return false;
+	}
+
+	if (!b) {
+		return true;
+	}
+
+	BUG_ON(a->ring != b->ring);
+
+	return a->seq < b->seq;
+}
+
+int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user, 
+			   void *owner, struct amdgpu_fence **fence);
+
+/*
+ * TTM.
+ */
+struct amdgpu_mman {
+	struct ttm_bo_global_ref        bo_global_ref;
+	struct drm_global_reference	mem_global_ref;
+	struct ttm_bo_device		bdev;
+	bool				mem_global_referenced;
+	bool				initialized;
+
+#if defined(CONFIG_DEBUG_FS)
+	struct dentry			*vram;
+	struct dentry			*gtt;
+#endif
+
+	/* buffer handling */
+	const struct amdgpu_buffer_funcs	*buffer_funcs;
+	struct amdgpu_ring			*buffer_funcs_ring;
+};
+
+int amdgpu_copy_buffer(struct amdgpu_ring *ring,
+		       uint64_t src_offset,
+		       uint64_t dst_offset,
+		       uint32_t byte_count,
+		       struct reservation_object *resv,
+		       struct amdgpu_fence **fence);
+int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
+
+struct amdgpu_bo_list_entry {
+	struct amdgpu_bo		*robj;
+	struct ttm_validate_buffer	tv;
+	struct amdgpu_bo_va		*bo_va;
+	unsigned			prefered_domains;
+	unsigned			allowed_domains;
+	uint32_t			priority;
+};
+
+struct amdgpu_bo_va_mapping {
+	struct list_head		list;
+	struct interval_tree_node	it;
+	uint64_t			offset;
+	uint32_t			flags;
+};
+
+/* bo virtual addresses in a specific vm */
+struct amdgpu_bo_va {
+	/* protected by bo being reserved */
+	struct list_head		bo_list;
+	uint64_t			addr;
+	struct amdgpu_fence		*last_pt_update;
+	unsigned			ref_count;
+
+	/* protected by vm mutex */
+	struct list_head		mappings;
+	struct list_head		vm_status;
+
+	/* constant after initialization */
+	struct amdgpu_vm		*vm;
+	struct amdgpu_bo		*bo;
+};
+
+#define AMDGPU_GEM_DOMAIN_MAX		0x3
+
+struct amdgpu_bo {
+	/* Protected by gem.mutex */
+	struct list_head		list;
+	/* Protected by tbo.reserved */
+	u32				initial_domain;
+	struct ttm_place		placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+	struct ttm_placement		placement;
+	struct ttm_buffer_object	tbo;
+	struct ttm_bo_kmap_obj		kmap;
+	u64				flags;
+	unsigned			pin_count;
+	void				*kptr;
+	u64				tiling_flags;
+	u64				metadata_flags;
+	void				*metadata;
+	u32				metadata_size;
+	/* list of all virtual address to which this bo
+	 * is associated to
+	 */
+	struct list_head		va;
+	/* Constant after initialization */
+	struct amdgpu_device		*adev;
+	struct drm_gem_object		gem_base;
+
+	struct ttm_bo_kmap_obj		dma_buf_vmap;
+	pid_t				pid;
+	struct amdgpu_mn		*mn;
+	struct list_head		mn_list;
+};
+#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
+
+void amdgpu_gem_object_free(struct drm_gem_object *obj);
+int amdgpu_gem_object_open(struct drm_gem_object *obj,
+				struct drm_file *file_priv);
+void amdgpu_gem_object_close(struct drm_gem_object *obj,
+				struct drm_file *file_priv);
+unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
+struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
+							struct dma_buf_attachment *attach,
+							struct sg_table *sg);
+struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
+					struct drm_gem_object *gobj,
+					int flags);
+int amdgpu_gem_prime_pin(struct drm_gem_object *obj);
+void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
+struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
+void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
+void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
+
+/* sub-allocation manager, it has to be protected by another lock.
+ * By conception this is an helper for other part of the driver
+ * like the indirect buffer or semaphore, which both have their
+ * locking.
+ *
+ * Principe is simple, we keep a list of sub allocation in offset
+ * order (first entry has offset == 0, last entry has the highest
+ * offset).
+ *
+ * When allocating new object we first check if there is room at
+ * the end total_size - (last_object_offset + last_object_size) >=
+ * alloc_size. If so we allocate new object there.
+ *
+ * When there is not enough room at the end, we start waiting for
+ * each sub object until we reach object_offset+object_size >=
+ * alloc_size, this object then become the sub object we return.
+ *
+ * Alignment can't be bigger than page size.
+ *
+ * Hole are not considered for allocation to keep things simple.
+ * Assumption is that there won't be hole (all object on same
+ * alignment).
+ */
+struct amdgpu_sa_manager {
+	wait_queue_head_t	wq;
+	struct amdgpu_bo	*bo;
+	struct list_head	*hole;
+	struct list_head	flist[AMDGPU_MAX_RINGS];
+	struct list_head	olist;
+	unsigned		size;
+	uint64_t		gpu_addr;
+	void			*cpu_ptr;
+	uint32_t		domain;
+	uint32_t		align;
+};
+
+struct amdgpu_sa_bo;
+
+/* sub-allocation buffer */
+struct amdgpu_sa_bo {
+	struct list_head		olist;
+	struct list_head		flist;
+	struct amdgpu_sa_manager	*manager;
+	unsigned			soffset;
+	unsigned			eoffset;
+	struct amdgpu_fence		*fence;
+};
+
+/*
+ * GEM objects.
+ */
+struct amdgpu_gem {
+	struct mutex		mutex;
+	struct list_head	objects;
+};
+
+int amdgpu_gem_init(struct amdgpu_device *adev);
+void amdgpu_gem_fini(struct amdgpu_device *adev);
+int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
+				int alignment, u32 initial_domain,
+				u64 flags, bool kernel,
+				struct drm_gem_object **obj);
+
+int amdgpu_mode_dumb_create(struct drm_file *file_priv,
+			    struct drm_device *dev,
+			    struct drm_mode_create_dumb *args);
+int amdgpu_mode_dumb_mmap(struct drm_file *filp,
+			  struct drm_device *dev,
+			  uint32_t handle, uint64_t *offset_p);
+
+/*
+ * Semaphores.
+ */
+struct amdgpu_semaphore {
+	struct amdgpu_sa_bo	*sa_bo;
+	signed			waiters;
+	uint64_t		gpu_addr;
+};
+
+int amdgpu_semaphore_create(struct amdgpu_device *adev,
+			    struct amdgpu_semaphore **semaphore);
+bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring,
+				  struct amdgpu_semaphore *semaphore);
+bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
+				struct amdgpu_semaphore *semaphore);
+void amdgpu_semaphore_free(struct amdgpu_device *adev,
+			   struct amdgpu_semaphore **semaphore,
+			   struct amdgpu_fence *fence);
+
+/*
+ * Synchronization
+ */
+struct amdgpu_sync {
+	struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
+	struct amdgpu_fence	*sync_to[AMDGPU_MAX_RINGS];
+	struct amdgpu_fence	*last_vm_update;
+};
+
+void amdgpu_sync_create(struct amdgpu_sync *sync);
+void amdgpu_sync_fence(struct amdgpu_sync *sync,
+		       struct amdgpu_fence *fence);
+int amdgpu_sync_resv(struct amdgpu_device *adev,
+		     struct amdgpu_sync *sync,
+		     struct reservation_object *resv,
+		     void *owner);
+int amdgpu_sync_rings(struct amdgpu_sync *sync,
+		      struct amdgpu_ring *ring);
+void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+		      struct amdgpu_fence *fence);
+
+/*
+ * GART structures, functions & helpers
+ */
+struct amdgpu_mc;
+
+#define AMDGPU_GPU_PAGE_SIZE 4096
+#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
+#define AMDGPU_GPU_PAGE_SHIFT 12
+#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
+
+struct amdgpu_gart {
+	dma_addr_t			table_addr;
+	struct amdgpu_bo		*robj;
+	void				*ptr;
+	unsigned			num_gpu_pages;
+	unsigned			num_cpu_pages;
+	unsigned			table_size;
+	struct page			**pages;
+	dma_addr_t			*pages_addr;
+	bool				ready;
+	const struct amdgpu_gart_funcs *gart_funcs;
+};
+
+int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
+void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
+int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
+void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
+int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
+void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
+int amdgpu_gart_init(struct amdgpu_device *adev);
+void amdgpu_gart_fini(struct amdgpu_device *adev);
+void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
+			int pages);
+int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
+		     int pages, struct page **pagelist,
+		     dma_addr_t *dma_addr, uint32_t flags);
+
+/*
+ * GPU MC structures, functions & helpers
+ */
+struct amdgpu_mc {
+	resource_size_t		aper_size;
+	resource_size_t		aper_base;
+	resource_size_t		agp_base;
+	/* for some chips with <= 32MB we need to lie
+	 * about vram size near mc fb location */
+	u64			mc_vram_size;
+	u64			visible_vram_size;
+	u64			gtt_size;
+	u64			gtt_start;
+	u64			gtt_end;
+	u64			vram_start;
+	u64			vram_end;
+	unsigned		vram_width;
+	u64			real_vram_size;
+	int			vram_mtrr;
+	u64                     gtt_base_align;
+	u64                     mc_mask;
+	const struct firmware   *fw;	/* MC firmware */
+	uint32_t                fw_version;
+	struct amdgpu_irq_src	vm_fault;
+	uint32_t		vram_type;
+};
+
+/*
+ * GPU doorbell structures, functions & helpers
+ */
+typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
+{
+	AMDGPU_DOORBELL_KIQ                     = 0x000,
+	AMDGPU_DOORBELL_HIQ                     = 0x001,
+	AMDGPU_DOORBELL_DIQ                     = 0x002,
+	AMDGPU_DOORBELL_MEC_RING0               = 0x010,
+	AMDGPU_DOORBELL_MEC_RING1               = 0x011,
+	AMDGPU_DOORBELL_MEC_RING2               = 0x012,
+	AMDGPU_DOORBELL_MEC_RING3               = 0x013,
+	AMDGPU_DOORBELL_MEC_RING4               = 0x014,
+	AMDGPU_DOORBELL_MEC_RING5               = 0x015,
+	AMDGPU_DOORBELL_MEC_RING6               = 0x016,
+	AMDGPU_DOORBELL_MEC_RING7               = 0x017,
+	AMDGPU_DOORBELL_GFX_RING0               = 0x020,
+	AMDGPU_DOORBELL_sDMA_ENGINE0            = 0x1E0,
+	AMDGPU_DOORBELL_sDMA_ENGINE1            = 0x1E1,
+	AMDGPU_DOORBELL_IH                      = 0x1E8,
+	AMDGPU_DOORBELL_MAX_ASSIGNMENT          = 0x3FF,
+	AMDGPU_DOORBELL_INVALID                 = 0xFFFF
+} AMDGPU_DOORBELL_ASSIGNMENT;
+
+struct amdgpu_doorbell {
+	/* doorbell mmio */
+	resource_size_t		base;
+	resource_size_t		size;
+	u32 __iomem		*ptr;
+	u32			num_doorbells;	/* Number of doorbells actually reserved for amdgpu. */
+};
+
+void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
+				phys_addr_t *aperture_base,
+				size_t *aperture_size,
+				size_t *start_offset);
+
+/*
+ * IRQS.
+ */
+
+struct amdgpu_flip_work {
+	struct work_struct		flip_work;
+	struct work_struct		unpin_work;
+	struct amdgpu_device		*adev;
+	int				crtc_id;
+	uint64_t			base;
+	struct drm_pending_vblank_event *event;
+	struct amdgpu_bo		*old_rbo;
+	struct fence			*fence;
+};
+
+
+/*
+ * CP & rings.
+ */
+
+struct amdgpu_ib {
+	struct amdgpu_sa_bo		*sa_bo;
+	uint32_t			length_dw;
+	uint64_t			gpu_addr;
+	uint32_t			*ptr;
+	struct amdgpu_ring		*ring;
+	struct amdgpu_fence		*fence;
+	struct amdgpu_user_fence        *user;
+	struct amdgpu_vm		*vm;
+	struct amdgpu_ctx		*ctx;
+	struct amdgpu_sync		sync;
+	uint32_t			gds_base, gds_size;
+	uint32_t			gws_base, gws_size;
+	uint32_t			oa_base, oa_size;
+	uint32_t			flags;
+};
+
+enum amdgpu_ring_type {
+	AMDGPU_RING_TYPE_GFX,
+	AMDGPU_RING_TYPE_COMPUTE,
+	AMDGPU_RING_TYPE_SDMA,
+	AMDGPU_RING_TYPE_UVD,
+	AMDGPU_RING_TYPE_VCE
+};
+
+struct amdgpu_ring {
+	struct amdgpu_device		*adev;
+	const struct amdgpu_ring_funcs	*funcs;
+	struct amdgpu_fence_driver	fence_drv;
+
+	struct mutex		*ring_lock;
+	struct amdgpu_bo	*ring_obj;
+	volatile uint32_t	*ring;
+	unsigned		rptr_offs;
+	u64			next_rptr_gpu_addr;
+	volatile u32		*next_rptr_cpu_addr;
+	unsigned		wptr;
+	unsigned		wptr_old;
+	unsigned		ring_size;
+	unsigned		ring_free_dw;
+	int			count_dw;
+	atomic_t		last_rptr;
+	atomic64_t		last_activity;
+	uint64_t		gpu_addr;
+	uint32_t		align_mask;
+	uint32_t		ptr_mask;
+	bool			ready;
+	u32			nop;
+	u32			idx;
+	u64			last_semaphore_signal_addr;
+	u64			last_semaphore_wait_addr;
+	u32			me;
+	u32			pipe;
+	u32			queue;
+	struct amdgpu_bo	*mqd_obj;
+	u32			doorbell_index;
+	bool			use_doorbell;
+	unsigned		wptr_offs;
+	unsigned		next_rptr_offs;
+	unsigned		fence_offs;
+	struct amdgpu_ctx	*current_ctx;
+	enum amdgpu_ring_type	type;
+	char			name[16];
+};
+
+/*
+ * VM
+ */
+
+/* maximum number of VMIDs */
+#define AMDGPU_NUM_VM	16
+
+/* number of entries in page table */
+#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
+
+/* PTBs (Page Table Blocks) need to be aligned to 32K */
+#define AMDGPU_VM_PTB_ALIGN_SIZE   32768
+#define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1)
+#define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK)
+
+#define AMDGPU_PTE_VALID	(1 << 0)
+#define AMDGPU_PTE_SYSTEM	(1 << 1)
+#define AMDGPU_PTE_SNOOPED	(1 << 2)
+
+/* VI only */
+#define AMDGPU_PTE_EXECUTABLE	(1 << 4)
+
+#define AMDGPU_PTE_READABLE	(1 << 5)
+#define AMDGPU_PTE_WRITEABLE	(1 << 6)
+
+/* PTE (Page Table Entry) fragment field for different page sizes */
+#define AMDGPU_PTE_FRAG_4KB	(0 << 7)
+#define AMDGPU_PTE_FRAG_64KB	(4 << 7)
+#define AMDGPU_LOG2_PAGES_PER_FRAG 4
+
+struct amdgpu_vm_pt {
+	struct amdgpu_bo		*bo;
+	uint64_t			addr;
+};
+
+struct amdgpu_vm_id {
+	unsigned		id;
+	uint64_t		pd_gpu_addr;
+	/* last flushed PD/PT update */
+	struct amdgpu_fence	*flushed_updates;
+	/* last use of vmid */
+	struct amdgpu_fence	*last_id_use;
+};
+
+struct amdgpu_vm {
+	struct mutex		mutex;
+
+	struct rb_root		va;
+
+	/* protecting invalidated and freed */
+	spinlock_t		status_lock;
+
+	/* BOs moved, but not yet updated in the PT */
+	struct list_head	invalidated;
+
+	/* BOs freed, but not yet updated in the PT */
+	struct list_head	freed;
+
+	/* contains the page directory */
+	struct amdgpu_bo	*page_directory;
+	unsigned		max_pde_used;
+
+	/* array of page tables, one for each page directory entry */
+	struct amdgpu_vm_pt	*page_tables;
+
+	/* for id and flush management per ring */
+	struct amdgpu_vm_id	ids[AMDGPU_MAX_RINGS];
+};
+
+struct amdgpu_vm_manager {
+	struct amdgpu_fence		*active[AMDGPU_NUM_VM];
+	uint32_t			max_pfn;
+	/* number of VMIDs */
+	unsigned			nvm;
+	/* vram base address for page table entry  */
+	u64				vram_base_offset;
+	/* is vm enabled? */
+	bool				enabled;
+	/* for hw to save the PD addr on suspend/resume */
+	uint32_t			saved_table_addr[AMDGPU_NUM_VM];
+	/* vm pte handling */
+	const struct amdgpu_vm_pte_funcs        *vm_pte_funcs;
+	struct amdgpu_ring                      *vm_pte_funcs_ring;
+};
+
+/*
+ * context related structures
+ */
+
+struct amdgpu_ctx_state {
+	uint64_t flags;
+	uint32_t hangs;
+};
+
+struct amdgpu_ctx {
+	/* call kref_get()before CS start and kref_put() after CS fence signaled */
+	struct kref refcount;
+	struct amdgpu_fpriv *fpriv;
+	struct amdgpu_ctx_state state;
+	uint32_t id;
+	unsigned reset_counter;
+};
+
+struct amdgpu_ctx_mgr {
+	struct amdgpu_device *adev;
+	struct idr ctx_handles;
+	/* lock for IDR system */
+	struct mutex lock;
+};
+
+/*
+ * file private structure
+ */
+
+struct amdgpu_fpriv {
+	struct amdgpu_vm	vm;
+	struct mutex		bo_list_lock;
+	struct idr		bo_list_handles;
+	struct amdgpu_ctx_mgr ctx_mgr;
+};
+
+/*
+ * residency list
+ */
+
+struct amdgpu_bo_list {
+	struct mutex lock;
+	struct amdgpu_bo *gds_obj;
+	struct amdgpu_bo *gws_obj;
+	struct amdgpu_bo *oa_obj;
+	bool has_userptr;
+	unsigned num_entries;
+	struct amdgpu_bo_list_entry *array;
+};
+
+struct amdgpu_bo_list *
+amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
+void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
+void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
+
+/*
+ * GFX stuff
+ */
+#include "clearstate_defs.h"
+
+struct amdgpu_rlc {
+	/* for power gating */
+	struct amdgpu_bo	*save_restore_obj;
+	uint64_t		save_restore_gpu_addr;
+	volatile uint32_t	*sr_ptr;
+	const u32               *reg_list;
+	u32                     reg_list_size;
+	/* for clear state */
+	struct amdgpu_bo	*clear_state_obj;
+	uint64_t		clear_state_gpu_addr;
+	volatile uint32_t	*cs_ptr;
+	const struct cs_section_def   *cs_data;
+	u32                     clear_state_size;
+	/* for cp tables */
+	struct amdgpu_bo	*cp_table_obj;
+	uint64_t		cp_table_gpu_addr;
+	volatile uint32_t	*cp_table_ptr;
+	u32                     cp_table_size;
+};
+
+struct amdgpu_mec {
+	struct amdgpu_bo	*hpd_eop_obj;
+	u64			hpd_eop_gpu_addr;
+	u32 num_pipe;
+	u32 num_mec;
+	u32 num_queue;
+};
+
+/*
+ * GPU scratch registers structures, functions & helpers
+ */
+struct amdgpu_scratch {
+	unsigned		num_reg;
+	uint32_t                reg_base;
+	bool			free[32];
+	uint32_t		reg[32];
+};
+
+/*
+ * GFX configurations
+ */
+struct amdgpu_gca_config {
+	unsigned max_shader_engines;
+	unsigned max_tile_pipes;
+	unsigned max_cu_per_sh;
+	unsigned max_sh_per_se;
+	unsigned max_backends_per_se;
+	unsigned max_texture_channel_caches;
+	unsigned max_gprs;
+	unsigned max_gs_threads;
+	unsigned max_hw_contexts;
+	unsigned sc_prim_fifo_size_frontend;
+	unsigned sc_prim_fifo_size_backend;
+	unsigned sc_hiz_tile_fifo_size;
+	unsigned sc_earlyz_tile_fifo_size;
+
+	unsigned num_tile_pipes;
+	unsigned backend_enable_mask;
+	unsigned mem_max_burst_length_bytes;
+	unsigned mem_row_size_in_kb;
+	unsigned shader_engine_tile_size;
+	unsigned num_gpus;
+	unsigned multi_gpu_tile_size;
+	unsigned mc_arb_ramcfg;
+	unsigned gb_addr_config;
+
+	uint32_t tile_mode_array[32];
+	uint32_t macrotile_mode_array[16];
+};
+
+struct amdgpu_gfx {
+	struct mutex			gpu_clock_mutex;
+	struct amdgpu_gca_config	config;
+	struct amdgpu_rlc		rlc;
+	struct amdgpu_mec		mec;
+	struct amdgpu_scratch		scratch;
+	const struct firmware		*me_fw;	/* ME firmware */
+	uint32_t			me_fw_version;
+	const struct firmware		*pfp_fw; /* PFP firmware */
+	uint32_t			pfp_fw_version;
+	const struct firmware		*ce_fw;	/* CE firmware */
+	uint32_t			ce_fw_version;
+	const struct firmware		*rlc_fw; /* RLC firmware */
+	uint32_t			rlc_fw_version;
+	const struct firmware		*mec_fw; /* MEC firmware */
+	uint32_t			mec_fw_version;
+	const struct firmware		*mec2_fw; /* MEC2 firmware */
+	uint32_t			mec2_fw_version;
+	uint32_t			me_feature_version;
+	uint32_t			ce_feature_version;
+	uint32_t			pfp_feature_version;
+	struct amdgpu_ring		gfx_ring[AMDGPU_MAX_GFX_RINGS];
+	unsigned			num_gfx_rings;
+	struct amdgpu_ring		compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
+	unsigned			num_compute_rings;
+	struct amdgpu_irq_src		eop_irq;
+	struct amdgpu_irq_src		priv_reg_irq;
+	struct amdgpu_irq_src		priv_inst_irq;
+	/* gfx status */
+	uint32_t gfx_current_status;
+	/* sync signal for const engine */
+	unsigned ce_sync_offs;
+	/* ce ram size*/
+	unsigned ce_ram_size;
+};
+
+int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
+		  unsigned size, struct amdgpu_ib *ib);
+void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib);
+int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
+		       struct amdgpu_ib *ib, void *owner);
+int amdgpu_ib_pool_init(struct amdgpu_device *adev);
+void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
+int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
+/* Ring access between begin & end cannot sleep */
+void amdgpu_ring_free_size(struct amdgpu_ring *ring);
+int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
+int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw);
+void amdgpu_ring_commit(struct amdgpu_ring *ring);
+void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring);
+void amdgpu_ring_undo(struct amdgpu_ring *ring);
+void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring);
+void amdgpu_ring_lockup_update(struct amdgpu_ring *ring);
+bool amdgpu_ring_test_lockup(struct amdgpu_ring *ring);
+unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
+			    uint32_t **data);
+int amdgpu_ring_restore(struct amdgpu_ring *ring,
+			unsigned size, uint32_t *data);
+int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
+		     unsigned ring_size, u32 nop, u32 align_mask,
+		     struct amdgpu_irq_src *irq_src, unsigned irq_type,
+		     enum amdgpu_ring_type ring_type);
+void amdgpu_ring_fini(struct amdgpu_ring *ring);
+
+/*
+ * CS.
+ */
+struct amdgpu_cs_chunk {
+	uint32_t		chunk_id;
+	uint32_t		length_dw;
+	uint32_t		*kdata;
+	void __user		*user_ptr;
+};
+
+struct amdgpu_cs_parser {
+	struct amdgpu_device	*adev;
+	struct drm_file		*filp;
+	struct amdgpu_ctx	*ctx;
+	struct amdgpu_bo_list *bo_list;
+	/* chunks */
+	unsigned		nchunks;
+	struct amdgpu_cs_chunk	*chunks;
+	/* relocations */
+	struct amdgpu_bo_list_entry	*vm_bos;
+	struct list_head	validated;
+
+	struct amdgpu_ib	*ibs;
+	uint32_t		num_ibs;
+
+	struct ww_acquire_ctx	ticket;
+
+	/* user fence */
+	struct amdgpu_user_fence uf;
+};
+
+static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
+{
+	return p->ibs[ib_idx].ptr[idx];
+}
+
+/*
+ * Writeback
+ */
+#define AMDGPU_MAX_WB 1024	/* Reserve at most 1024 WB slots for amdgpu-owned rings. */
+
+struct amdgpu_wb {
+	struct amdgpu_bo	*wb_obj;
+	volatile uint32_t	*wb;
+	uint64_t		gpu_addr;
+	u32			num_wb;	/* Number of wb slots actually reserved for amdgpu. */
+	unsigned long		used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
+};
+
+int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
+void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
+
+/**
+ * struct amdgpu_pm - power management datas
+ * It keeps track of various data needed to take powermanagement decision.
+ */
+
+enum amdgpu_pm_state_type {
+	/* not used for dpm */
+	POWER_STATE_TYPE_DEFAULT,
+	POWER_STATE_TYPE_POWERSAVE,
+	/* user selectable states */
+	POWER_STATE_TYPE_BATTERY,
+	POWER_STATE_TYPE_BALANCED,
+	POWER_STATE_TYPE_PERFORMANCE,
+	/* internal states */
+	POWER_STATE_TYPE_INTERNAL_UVD,
+	POWER_STATE_TYPE_INTERNAL_UVD_SD,
+	POWER_STATE_TYPE_INTERNAL_UVD_HD,
+	POWER_STATE_TYPE_INTERNAL_UVD_HD2,
+	POWER_STATE_TYPE_INTERNAL_UVD_MVC,
+	POWER_STATE_TYPE_INTERNAL_BOOT,
+	POWER_STATE_TYPE_INTERNAL_THERMAL,
+	POWER_STATE_TYPE_INTERNAL_ACPI,
+	POWER_STATE_TYPE_INTERNAL_ULV,
+	POWER_STATE_TYPE_INTERNAL_3DPERF,
+};
+
+enum amdgpu_int_thermal_type {
+	THERMAL_TYPE_NONE,
+	THERMAL_TYPE_EXTERNAL,
+	THERMAL_TYPE_EXTERNAL_GPIO,
+	THERMAL_TYPE_RV6XX,
+	THERMAL_TYPE_RV770,
+	THERMAL_TYPE_ADT7473_WITH_INTERNAL,
+	THERMAL_TYPE_EVERGREEN,
+	THERMAL_TYPE_SUMO,
+	THERMAL_TYPE_NI,
+	THERMAL_TYPE_SI,
+	THERMAL_TYPE_EMC2103_WITH_INTERNAL,
+	THERMAL_TYPE_CI,
+	THERMAL_TYPE_KV,
+};
+
+enum amdgpu_dpm_auto_throttle_src {
+	AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
+	AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
+};
+
+enum amdgpu_dpm_event_src {
+	AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
+	AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
+	AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
+	AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
+	AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
+};
+
+#define AMDGPU_MAX_VCE_LEVELS 6
+
+enum amdgpu_vce_level {
+	AMDGPU_VCE_LEVEL_AC_ALL = 0,     /* AC, All cases */
+	AMDGPU_VCE_LEVEL_DC_EE = 1,      /* DC, entropy encoding */
+	AMDGPU_VCE_LEVEL_DC_LL_LOW = 2,  /* DC, low latency queue, res <= 720 */
+	AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
+	AMDGPU_VCE_LEVEL_DC_GP_LOW = 4,  /* DC, general purpose queue, res <= 720 */
+	AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
+};
+
+struct amdgpu_ps {
+	u32 caps; /* vbios flags */
+	u32 class; /* vbios flags */
+	u32 class2; /* vbios flags */
+	/* UVD clocks */
+	u32 vclk;
+	u32 dclk;
+	/* VCE clocks */
+	u32 evclk;
+	u32 ecclk;
+	bool vce_active;
+	enum amdgpu_vce_level vce_level;
+	/* asic priv */
+	void *ps_priv;
+};
+
+struct amdgpu_dpm_thermal {
+	/* thermal interrupt work */
+	struct work_struct work;
+	/* low temperature threshold */
+	int                min_temp;
+	/* high temperature threshold */
+	int                max_temp;
+	/* was last interrupt low to high or high to low */
+	bool               high_to_low;
+	/* interrupt source */
+	struct amdgpu_irq_src	irq;
+};
+
+enum amdgpu_clk_action
+{
+	AMDGPU_SCLK_UP = 1,
+	AMDGPU_SCLK_DOWN
+};
+
+struct amdgpu_blacklist_clocks
+{
+	u32 sclk;
+	u32 mclk;
+	enum amdgpu_clk_action action;
+};
+
+struct amdgpu_clock_and_voltage_limits {
+	u32 sclk;
+	u32 mclk;
+	u16 vddc;
+	u16 vddci;
+};
+
+struct amdgpu_clock_array {
+	u32 count;
+	u32 *values;
+};
+
+struct amdgpu_clock_voltage_dependency_entry {
+	u32 clk;
+	u16 v;
+};
+
+struct amdgpu_clock_voltage_dependency_table {
+	u32 count;
+	struct amdgpu_clock_voltage_dependency_entry *entries;
+};
+
+union amdgpu_cac_leakage_entry {
+	struct {
+		u16 vddc;
+		u32 leakage;
+	};
+	struct {
+		u16 vddc1;
+		u16 vddc2;
+		u16 vddc3;
+	};
+};
+
+struct amdgpu_cac_leakage_table {
+	u32 count;
+	union amdgpu_cac_leakage_entry *entries;
+};
+
+struct amdgpu_phase_shedding_limits_entry {
+	u16 voltage;
+	u32 sclk;
+	u32 mclk;
+};
+
+struct amdgpu_phase_shedding_limits_table {
+	u32 count;
+	struct amdgpu_phase_shedding_limits_entry *entries;
+};
+
+struct amdgpu_uvd_clock_voltage_dependency_entry {
+	u32 vclk;
+	u32 dclk;
+	u16 v;
+};
+
+struct amdgpu_uvd_clock_voltage_dependency_table {
+	u8 count;
+	struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
+};
+
+struct amdgpu_vce_clock_voltage_dependency_entry {
+	u32 ecclk;
+	u32 evclk;
+	u16 v;
+};
+
+struct amdgpu_vce_clock_voltage_dependency_table {
+	u8 count;
+	struct amdgpu_vce_clock_voltage_dependency_entry *entries;
+};
+
+struct amdgpu_ppm_table {
+	u8 ppm_design;
+	u16 cpu_core_number;
+	u32 platform_tdp;
+	u32 small_ac_platform_tdp;
+	u32 platform_tdc;
+	u32 small_ac_platform_tdc;
+	u32 apu_tdp;
+	u32 dgpu_tdp;
+	u32 dgpu_ulv_power;
+	u32 tj_max;
+};
+
+struct amdgpu_cac_tdp_table {
+	u16 tdp;
+	u16 configurable_tdp;
+	u16 tdc;
+	u16 battery_power_limit;
+	u16 small_power_limit;
+	u16 low_cac_leakage;
+	u16 high_cac_leakage;
+	u16 maximum_power_delivery_limit;
+};
+
+struct amdgpu_dpm_dynamic_state {
+	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
+	struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
+	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
+	struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
+	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
+	struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
+	struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
+	struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
+	struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
+	struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
+	struct amdgpu_clock_array valid_sclk_values;
+	struct amdgpu_clock_array valid_mclk_values;
+	struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
+	struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
+	u32 mclk_sclk_ratio;
+	u32 sclk_mclk_delta;
+	u16 vddc_vddci_delta;
+	u16 min_vddc_for_pcie_gen2;
+	struct amdgpu_cac_leakage_table cac_leakage_table;
+	struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
+	struct amdgpu_ppm_table *ppm_table;
+	struct amdgpu_cac_tdp_table *cac_tdp_table;
+};
+
+struct amdgpu_dpm_fan {
+	u16 t_min;
+	u16 t_med;
+	u16 t_high;
+	u16 pwm_min;
+	u16 pwm_med;
+	u16 pwm_high;
+	u8 t_hyst;
+	u32 cycle_delay;
+	u16 t_max;
+	u8 control_mode;
+	u16 default_max_fan_pwm;
+	u16 default_fan_output_sensitivity;
+	u16 fan_output_sensitivity;
+	bool ucode_fan_control;
+};
+
+enum amdgpu_pcie_gen {
+	AMDGPU_PCIE_GEN1 = 0,
+	AMDGPU_PCIE_GEN2 = 1,
+	AMDGPU_PCIE_GEN3 = 2,
+	AMDGPU_PCIE_GEN_INVALID = 0xffff
+};
+
+enum amdgpu_dpm_forced_level {
+	AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
+	AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
+	AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
+};
+
+struct amdgpu_vce_state {
+	/* vce clocks */
+	u32 evclk;
+	u32 ecclk;
+	/* gpu clocks */
+	u32 sclk;
+	u32 mclk;
+	u8 clk_idx;
+	u8 pstate;
+};
+
+struct amdgpu_dpm_funcs {
+	int (*get_temperature)(struct amdgpu_device *adev);
+	int (*pre_set_power_state)(struct amdgpu_device *adev);
+	int (*set_power_state)(struct amdgpu_device *adev);
+	void (*post_set_power_state)(struct amdgpu_device *adev);
+	void (*display_configuration_changed)(struct amdgpu_device *adev);
+	u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
+	u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
+	void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
+	void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
+	int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
+	bool (*vblank_too_short)(struct amdgpu_device *adev);
+	void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
+	void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
+	void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
+	u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
+	int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
+	int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
+};
+
+struct amdgpu_dpm {
+	struct amdgpu_ps        *ps;
+	/* number of valid power states */
+	int                     num_ps;
+	/* current power state that is active */
+	struct amdgpu_ps        *current_ps;
+	/* requested power state */
+	struct amdgpu_ps        *requested_ps;
+	/* boot up power state */
+	struct amdgpu_ps        *boot_ps;
+	/* default uvd power state */
+	struct amdgpu_ps        *uvd_ps;
+	/* vce requirements */
+	struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS];
+	enum amdgpu_vce_level vce_level;
+	enum amdgpu_pm_state_type state;
+	enum amdgpu_pm_state_type user_state;
+	u32                     platform_caps;
+	u32                     voltage_response_time;
+	u32                     backbias_response_time;
+	void                    *priv;
+	u32			new_active_crtcs;
+	int			new_active_crtc_count;
+	u32			current_active_crtcs;
+	int			current_active_crtc_count;
+	struct amdgpu_dpm_dynamic_state dyn_state;
+	struct amdgpu_dpm_fan fan;
+	u32 tdp_limit;
+	u32 near_tdp_limit;
+	u32 near_tdp_limit_adjusted;
+	u32 sq_ramping_threshold;
+	u32 cac_leakage;
+	u16 tdp_od_limit;
+	u32 tdp_adjustment;
+	u16 load_line_slope;
+	bool power_control;
+	bool ac_power;
+	/* special states active */
+	bool                    thermal_active;
+	bool                    uvd_active;
+	bool                    vce_active;
+	/* thermal handling */
+	struct amdgpu_dpm_thermal thermal;
+	/* forced levels */
+	enum amdgpu_dpm_forced_level forced_level;
+};
+
+struct amdgpu_pm {
+	struct mutex		mutex;
+	u32                     current_sclk;
+	u32                     current_mclk;
+	u32                     default_sclk;
+	u32                     default_mclk;
+	struct amdgpu_i2c_chan *i2c_bus;
+	/* internal thermal controller on rv6xx+ */
+	enum amdgpu_int_thermal_type int_thermal_type;
+	struct device	        *int_hwmon_dev;
+	/* fan control parameters */
+	bool                    no_fan;
+	u8                      fan_pulses_per_revolution;
+	u8                      fan_min_rpm;
+	u8                      fan_max_rpm;
+	/* dpm */
+	bool                    dpm_enabled;
+	struct amdgpu_dpm       dpm;
+	const struct firmware	*fw;	/* SMC firmware */
+	uint32_t                fw_version;
+	const struct amdgpu_dpm_funcs *funcs;
+};
+
+/*
+ * UVD
+ */
+#define AMDGPU_MAX_UVD_HANDLES	10
+#define AMDGPU_UVD_STACK_SIZE	(1024*1024)
+#define AMDGPU_UVD_HEAP_SIZE	(1024*1024)
+#define AMDGPU_UVD_FIRMWARE_OFFSET 256
+
+struct amdgpu_uvd {
+	struct amdgpu_bo	*vcpu_bo;
+	void			*cpu_addr;
+	uint64_t		gpu_addr;
+	void			*saved_bo;
+	atomic_t		handles[AMDGPU_MAX_UVD_HANDLES];
+	struct drm_file		*filp[AMDGPU_MAX_UVD_HANDLES];
+	struct delayed_work	idle_work;
+	const struct firmware	*fw;	/* UVD firmware */
+	struct amdgpu_ring	ring;
+	struct amdgpu_irq_src	irq;
+	bool			address_64_bit;
+};
+
+/*
+ * VCE
+ */
+#define AMDGPU_MAX_VCE_HANDLES	16
+#define AMDGPU_VCE_FIRMWARE_OFFSET 256
+
+struct amdgpu_vce {
+	struct amdgpu_bo	*vcpu_bo;
+	uint64_t		gpu_addr;
+	unsigned		fw_version;
+	unsigned		fb_version;
+	atomic_t		handles[AMDGPU_MAX_VCE_HANDLES];
+	struct drm_file		*filp[AMDGPU_MAX_VCE_HANDLES];
+	struct delayed_work	idle_work;
+	const struct firmware	*fw;	/* VCE firmware */
+	struct amdgpu_ring	ring[AMDGPU_MAX_VCE_RINGS];
+	struct amdgpu_irq_src	irq;
+};
+
+/*
+ * SDMA
+ */
+struct amdgpu_sdma {
+	/* SDMA firmware */
+	const struct firmware	*fw;
+	uint32_t		fw_version;
+
+	struct amdgpu_ring	ring;
+};
+
+/*
+ * Firmware
+ */
+struct amdgpu_firmware {
+	struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM];
+	bool smu_load;
+	struct amdgpu_bo *fw_buf;
+	unsigned int fw_size;
+};
+
+/*
+ * Benchmarking
+ */
+void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
+
+
+/*
+ * Testing
+ */
+void amdgpu_test_moves(struct amdgpu_device *adev);
+void amdgpu_test_ring_sync(struct amdgpu_device *adev,
+			   struct amdgpu_ring *cpA,
+			   struct amdgpu_ring *cpB);
+void amdgpu_test_syncing(struct amdgpu_device *adev);
+
+/*
+ * MMU Notifier
+ */
+#if defined(CONFIG_MMU_NOTIFIER)
+int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
+void amdgpu_mn_unregister(struct amdgpu_bo *bo);
+#else
+static int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
+{
+	return -ENODEV;
+}
+static void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
+#endif
+
+/*
+ * Debugfs
+ */
+struct amdgpu_debugfs {
+	struct drm_info_list	*files;
+	unsigned		num_files;
+};
+
+int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
+			     struct drm_info_list *files,
+			     unsigned nfiles);
+int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
+
+#if defined(CONFIG_DEBUG_FS)
+int amdgpu_debugfs_init(struct drm_minor *minor);
+void amdgpu_debugfs_cleanup(struct drm_minor *minor);
+#endif
+
+/*
+ * amdgpu smumgr functions
+ */
+struct amdgpu_smumgr_funcs {
+	int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype);
+	int (*request_smu_load_fw)(struct amdgpu_device *adev);
+	int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
+};
+
+/*
+ * amdgpu smumgr
+ */
+struct amdgpu_smumgr {
+	struct amdgpu_bo *toc_buf;
+	struct amdgpu_bo *smu_buf;
+	/* asic priv smu data */
+	void *priv;
+	spinlock_t smu_lock;
+	/* smumgr functions */
+	const struct amdgpu_smumgr_funcs *smumgr_funcs;
+	/* ucode loading complete flag */
+	uint32_t fw_flags;
+};
+
+/*
+ * ASIC specific register table accessible by UMD
+ */
+struct amdgpu_allowed_register_entry {
+	uint32_t reg_offset;
+	bool untouched;
+	bool grbm_indexed;
+};
+
+struct amdgpu_cu_info {
+	uint32_t number; /* total active CU number */
+	uint32_t ao_cu_mask;
+	uint32_t bitmap[4][4];
+};
+
+
+/*
+ * ASIC specific functions.
+ */
+struct amdgpu_asic_funcs {
+	bool (*read_disabled_bios)(struct amdgpu_device *adev);
+	int (*read_register)(struct amdgpu_device *adev, u32 se_num,
+			     u32 sh_num, u32 reg_offset, u32 *value);
+	void (*set_vga_state)(struct amdgpu_device *adev, bool state);
+	int (*reset)(struct amdgpu_device *adev);
+	/* wait for mc_idle */
+	int (*wait_for_mc_idle)(struct amdgpu_device *adev);
+	/* get the reference clock */
+	u32 (*get_xclk)(struct amdgpu_device *adev);
+	/* get the gpu clock counter */
+	uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
+	int (*get_cu_info)(struct amdgpu_device *adev, struct amdgpu_cu_info *info);
+	/* MM block clocks */
+	int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
+	int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
+};
+
+/*
+ * IOCTL.
+ */
+int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *filp);
+int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp);
+
+int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp);
+int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *filp);
+int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp);
+int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *filp);
+int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp);
+int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *filp);
+int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+
+int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp);
+
+/* VRAM scratch page for HDP bug, default vram page */
+struct amdgpu_vram_scratch {
+	struct amdgpu_bo		*robj;
+	volatile uint32_t		*ptr;
+	u64				gpu_addr;
+};
+
+/*
+ * ACPI
+ */
+struct amdgpu_atif_notification_cfg {
+	bool enabled;
+	int command_code;
+};
+
+struct amdgpu_atif_notifications {
+	bool display_switch;
+	bool expansion_mode_change;
+	bool thermal_state;
+	bool forced_power_state;
+	bool system_power_state;
+	bool display_conf_change;
+	bool px_gfx_switch;
+	bool brightness_change;
+	bool dgpu_display_event;
+};
+
+struct amdgpu_atif_functions {
+	bool system_params;
+	bool sbios_requests;
+	bool select_active_disp;
+	bool lid_state;
+	bool get_tv_standard;
+	bool set_tv_standard;
+	bool get_panel_expansion_mode;
+	bool set_panel_expansion_mode;
+	bool temperature_change;
+	bool graphics_device_types;
+};
+
+struct amdgpu_atif {
+	struct amdgpu_atif_notifications notifications;
+	struct amdgpu_atif_functions functions;
+	struct amdgpu_atif_notification_cfg notification_cfg;
+	struct amdgpu_encoder *encoder_for_bl;
+};
+
+struct amdgpu_atcs_functions {
+	bool get_ext_state;
+	bool pcie_perf_req;
+	bool pcie_dev_rdy;
+	bool pcie_bus_width;
+};
+
+struct amdgpu_atcs {
+	struct amdgpu_atcs_functions functions;
+};
+
+int amdgpu_ctx_alloc(struct amdgpu_device *adev,struct amdgpu_fpriv *fpriv,
+							uint32_t *id,uint32_t flags);
+int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
+						  uint32_t id);
+
+void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv);
+struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
+int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
+
+extern int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
+						 struct drm_file *filp);
+
+/*
+ * Core structure, functions and helpers.
+ */
+typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
+typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
+
+typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
+typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
+
+struct amdgpu_device {
+	struct device			*dev;
+	struct drm_device		*ddev;
+	struct pci_dev			*pdev;
+	struct rw_semaphore		exclusive_lock;
+
+	/* ASIC */
+	enum amdgpu_asic_type           asic_type;
+	uint32_t			family;
+	uint32_t			rev_id;
+	uint32_t			external_rev_id;
+	unsigned long			flags;
+	int				usec_timeout;
+	const struct amdgpu_asic_funcs	*asic_funcs;
+	bool				shutdown;
+	bool				suspend;
+	bool				need_dma32;
+	bool				accel_working;
+	bool				needs_reset;
+	struct work_struct 		reset_work;
+	struct notifier_block		acpi_nb;
+	struct amdgpu_i2c_chan		*i2c_bus[AMDGPU_MAX_I2C_BUS];
+	struct amdgpu_debugfs		debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
+	unsigned 			debugfs_count;
+#if defined(CONFIG_DEBUG_FS)
+	struct dentry			*debugfs_regs;
+#endif
+	struct amdgpu_atif		atif;
+	struct amdgpu_atcs		atcs;
+	struct mutex			srbm_mutex;
+	/* GRBM index mutex. Protects concurrent access to GRBM index */
+	struct mutex                    grbm_idx_mutex;
+	struct dev_pm_domain		vga_pm_domain;
+	bool				have_disp_power_ref;
+
+	/* BIOS */
+	uint8_t				*bios;
+	bool				is_atom_bios;
+	uint16_t			bios_header_start;
+	struct amdgpu_bo		*stollen_vga_memory;
+	uint32_t			bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
+
+	/* Register/doorbell mmio */
+	resource_size_t			rmmio_base;
+	resource_size_t			rmmio_size;
+	void __iomem			*rmmio;
+	/* protects concurrent MM_INDEX/DATA based register access */
+	spinlock_t mmio_idx_lock;
+	/* protects concurrent SMC based register access */
+	spinlock_t smc_idx_lock;
+	amdgpu_rreg_t			smc_rreg;
+	amdgpu_wreg_t			smc_wreg;
+	/* protects concurrent PCIE register access */
+	spinlock_t pcie_idx_lock;
+	amdgpu_rreg_t			pcie_rreg;
+	amdgpu_wreg_t			pcie_wreg;
+	/* protects concurrent UVD register access */
+	spinlock_t uvd_ctx_idx_lock;
+	amdgpu_rreg_t			uvd_ctx_rreg;
+	amdgpu_wreg_t			uvd_ctx_wreg;
+	/* protects concurrent DIDT register access */
+	spinlock_t didt_idx_lock;
+	amdgpu_rreg_t			didt_rreg;
+	amdgpu_wreg_t			didt_wreg;
+	/* protects concurrent ENDPOINT (audio) register access */
+	spinlock_t audio_endpt_idx_lock;
+	amdgpu_block_rreg_t		audio_endpt_rreg;
+	amdgpu_block_wreg_t		audio_endpt_wreg;
+	void __iomem                    *rio_mem;
+	resource_size_t			rio_mem_size;
+	struct amdgpu_doorbell		doorbell;
+
+	/* clock/pll info */
+	struct amdgpu_clock            clock;
+
+	/* MC */
+	struct amdgpu_mc		mc;
+	struct amdgpu_gart		gart;
+	struct amdgpu_dummy_page	dummy_page;
+	struct amdgpu_vm_manager	vm_manager;
+
+	/* memory management */
+	struct amdgpu_mman		mman;
+	struct amdgpu_gem		gem;
+	struct amdgpu_vram_scratch	vram_scratch;
+	struct amdgpu_wb		wb;
+	atomic64_t			vram_usage;
+	atomic64_t			vram_vis_usage;
+	atomic64_t			gtt_usage;
+	atomic64_t			num_bytes_moved;
+	atomic_t			gpu_reset_counter;
+
+	/* display */
+	struct amdgpu_mode_info		mode_info;
+	struct work_struct		hotplug_work;
+	struct amdgpu_irq_src		crtc_irq;
+	struct amdgpu_irq_src		pageflip_irq;
+	struct amdgpu_irq_src		hpd_irq;
+
+	/* rings */
+	wait_queue_head_t		fence_queue;
+	unsigned			fence_context;
+	struct mutex			ring_lock;
+	unsigned			num_rings;
+	struct amdgpu_ring		*rings[AMDGPU_MAX_RINGS];
+	bool				ib_pool_ready;
+	struct amdgpu_sa_manager	ring_tmp_bo;
+
+	/* interrupts */
+	struct amdgpu_irq		irq;
+
+	/* dpm */
+	struct amdgpu_pm		pm;
+	u32				cg_flags;
+	u32				pg_flags;
+
+	/* amdgpu smumgr */
+	struct amdgpu_smumgr smu;
+
+	/* gfx */
+	struct amdgpu_gfx		gfx;
+
+	/* sdma */
+	struct amdgpu_sdma		sdma[2];
+	struct amdgpu_irq_src		sdma_trap_irq;
+	struct amdgpu_irq_src		sdma_illegal_inst_irq;
+
+	/* uvd */
+	bool				has_uvd;
+	struct amdgpu_uvd		uvd;
+
+	/* vce */
+	struct amdgpu_vce		vce;
+
+	/* firmwares */
+	struct amdgpu_firmware		firmware;
+
+	/* GDS */
+	struct amdgpu_gds		gds;
+
+	const struct amdgpu_ip_block_version *ip_blocks;
+	int				num_ip_blocks;
+	bool				*ip_block_enabled;
+	struct mutex	mn_lock;
+	DECLARE_HASHTABLE(mn_hash, 7);
+
+	/* tracking pinned memory */
+	u64 vram_pin_size;
+	u64 gart_pin_size;
+};
+
+bool amdgpu_device_is_px(struct drm_device *dev);
+int amdgpu_device_init(struct amdgpu_device *adev,
+		       struct drm_device *ddev,
+		       struct pci_dev *pdev,
+		       uint32_t flags);
+void amdgpu_device_fini(struct amdgpu_device *adev);
+int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
+
+uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
+			bool always_indirect);
+void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
+		    bool always_indirect);
+u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
+void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
+
+u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
+void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
+
+/*
+ * Cast helper
+ */
+extern const struct fence_ops amdgpu_fence_ops;
+static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
+{
+	struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
+
+	if (__f->base.ops == &amdgpu_fence_ops)
+		return __f;
+
+	return NULL;
+}
+
+/*
+ * Registers read & write functions.
+ */
+#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false)
+#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), true)
+#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), false))
+#define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), false)
+#define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), true)
+#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
+#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
+#define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
+#define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
+#define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
+#define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
+#define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
+#define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
+#define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
+#define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
+#define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
+#define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
+#define WREG32_P(reg, val, mask)				\
+	do {							\
+		uint32_t tmp_ = RREG32(reg);			\
+		tmp_ &= (mask);					\
+		tmp_ |= ((val) & ~(mask));			\
+		WREG32(reg, tmp_);				\
+	} while (0)
+#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
+#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
+#define WREG32_PLL_P(reg, val, mask)				\
+	do {							\
+		uint32_t tmp_ = RREG32_PLL(reg);		\
+		tmp_ &= (mask);					\
+		tmp_ |= ((val) & ~(mask));			\
+		WREG32_PLL(reg, tmp_);				\
+	} while (0)
+#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
+#define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
+#define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
+
+#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
+#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
+
+#define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
+#define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
+
+#define REG_SET_FIELD(orig_val, reg, field, field_val)			\
+	(((orig_val) & ~REG_FIELD_MASK(reg, field)) |			\
+	 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field))))
+
+#define REG_GET_FIELD(value, reg, field)				\
+	(((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
+
+/*
+ * BIOS helpers.
+ */
+#define RBIOS8(i) (adev->bios[i])
+#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
+#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
+
+/*
+ * RING helpers.
+ */
+static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
+{
+	if (ring->count_dw <= 0)
+		DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
+	ring->ring[ring->wptr++] = v;
+	ring->wptr &= ring->ptr_mask;
+	ring->count_dw--;
+	ring->ring_free_dw--;
+}
+
+/*
+ * ASICs macro.
+ */
+#define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
+#define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
+#define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev))
+#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
+#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
+#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
+#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
+#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
+#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
+#define amdgpu_asic_get_cu_info(adev, info) (adev)->asic_funcs->get_cu_info((adev), (info))
+#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
+#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
+#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
+#define amdgpu_vm_write_pte(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (addr), (count), (incr), (flags)))
+#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
+#define amdgpu_vm_pad_ib(adev, ib) ((adev)->vm_manager.vm_pte_funcs->pad_ib((ib)))
+#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
+#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
+#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r))
+#define amdgpu_ring_is_lockup(r) (r)->funcs->is_lockup((r))
+#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
+#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
+#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
+#define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib))
+#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
+#define amdgpu_ring_emit_fence(r, addr, seq, write64bit) (r)->funcs->emit_fence((r), (addr), (seq), (write64bit))
+#define amdgpu_ring_emit_semaphore(r, semaphore, emit_wait) (r)->funcs->emit_semaphore((r), (semaphore), (emit_wait))
+#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
+#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
+#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
+#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
+#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
+#define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r))
+#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
+#define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
+#define amdgpu_display_is_display_hung(adev) (adev)->mode_info.funcs->is_display_hung((adev))
+#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
+#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
+#define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
+#define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
+#define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
+#define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
+#define amdgpu_display_page_flip(adev, crtc, base) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base))
+#define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
+#define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
+#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
+#define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
+#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
+#define amdgpu_emit_copy_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((r), (s), (d), (b))
+#define amdgpu_emit_fill_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((r), (s), (d), (b))
+#define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev))
+#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
+#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
+#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
+#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
+#define amdgpu_dpm_get_sclk(adev, l) (adev)->pm.funcs->get_sclk((adev), (l))
+#define amdgpu_dpm_get_mclk(adev, l) (adev)->pm.funcs->get_mclk((adev), (l))
+#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
+#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m))
+#define amdgpu_dpm_force_performance_level(adev, l) (adev)->pm.funcs->force_performance_level((adev), (l))
+#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
+#define amdgpu_dpm_powergate_uvd(adev, g) (adev)->pm.funcs->powergate_uvd((adev), (g))
+#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
+#define amdgpu_dpm_set_fan_control_mode(adev, m) (adev)->pm.funcs->set_fan_control_mode((adev), (m))
+#define amdgpu_dpm_get_fan_control_mode(adev) (adev)->pm.funcs->get_fan_control_mode((adev))
+#define amdgpu_dpm_set_fan_speed_percent(adev, s) (adev)->pm.funcs->set_fan_speed_percent((adev), (s))
+#define amdgpu_dpm_get_fan_speed_percent(adev, s) (adev)->pm.funcs->get_fan_speed_percent((adev), (s))
+
+#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
+
+/* Common functions */
+int amdgpu_gpu_reset(struct amdgpu_device *adev);
+void amdgpu_pci_config_reset(struct amdgpu_device *adev);
+bool amdgpu_card_posted(struct amdgpu_device *adev);
+void amdgpu_update_display_priority(struct amdgpu_device *adev);
+bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
+int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
+int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
+		       u32 ip_instance, u32 ring,
+		       struct amdgpu_ring **out_ring);
+void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain);
+bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
+int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
+				     uint32_t flags);
+bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
+bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
+uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
+				 struct ttm_mem_reg *mem);
+void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
+void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
+void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
+void amdgpu_program_register_sequence(struct amdgpu_device *adev,
+					     const u32 *registers,
+					     const u32 array_size);
+
+bool amdgpu_device_is_px(struct drm_device *dev);
+/* atpx handler */
+#if defined(CONFIG_VGA_SWITCHEROO)
+void amdgpu_register_atpx_handler(void);
+void amdgpu_unregister_atpx_handler(void);
+#else
+static inline void amdgpu_register_atpx_handler(void) {}
+static inline void amdgpu_unregister_atpx_handler(void) {}
+#endif
+
+/*
+ * KMS
+ */
+extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
+extern int amdgpu_max_kms_ioctl;
+
+int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
+int amdgpu_driver_unload_kms(struct drm_device *dev);
+void amdgpu_driver_lastclose_kms(struct drm_device *dev);
+int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
+void amdgpu_driver_postclose_kms(struct drm_device *dev,
+				 struct drm_file *file_priv);
+void amdgpu_driver_preclose_kms(struct drm_device *dev,
+				struct drm_file *file_priv);
+int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
+int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
+u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, int crtc);
+int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc);
+void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc);
+int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+				    int *max_error,
+				    struct timeval *vblank_time,
+				    unsigned flags);
+long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
+			     unsigned long arg);
+
+/*
+ * vm
+ */
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
+					  struct amdgpu_vm *vm,
+					  struct list_head *head);
+struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
+				       struct amdgpu_vm *vm);
+void amdgpu_vm_flush(struct amdgpu_ring *ring,
+		     struct amdgpu_vm *vm,
+		     struct amdgpu_fence *updates);
+void amdgpu_vm_fence(struct amdgpu_device *adev,
+		     struct amdgpu_vm *vm,
+		     struct amdgpu_fence *fence);
+uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+				    struct amdgpu_vm *vm);
+int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
+				struct amdgpu_vm *vm);
+int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
+				struct amdgpu_vm *vm, struct amdgpu_sync *sync);
+int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+			struct amdgpu_bo_va *bo_va,
+			struct ttm_mem_reg *mem);
+void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
+			     struct amdgpu_bo *bo);
+struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
+				       struct amdgpu_bo *bo);
+struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
+				      struct amdgpu_vm *vm,
+				      struct amdgpu_bo *bo);
+int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+		     struct amdgpu_bo_va *bo_va,
+		     uint64_t addr, uint64_t offset,
+		     uint64_t size, uint32_t flags);
+int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
+		       struct amdgpu_bo_va *bo_va,
+		       uint64_t addr);
+void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+		      struct amdgpu_bo_va *bo_va);
+
+/*
+ * functions used by amdgpu_encoder.c
+ */
+struct amdgpu_afmt_acr {
+	u32 clock;
+
+	int n_32khz;
+	int cts_32khz;
+
+	int n_44_1khz;
+	int cts_44_1khz;
+
+	int n_48khz;
+	int cts_48khz;
+
+};
+
+struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
+
+/* amdgpu_acpi.c */
+#if defined(CONFIG_ACPI)
+int amdgpu_acpi_init(struct amdgpu_device *adev);
+void amdgpu_acpi_fini(struct amdgpu_device *adev);
+bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
+int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
+						u8 perf_req, bool advertise);
+int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
+#else
+static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
+static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
+#endif
+
+struct amdgpu_bo_va_mapping *
+amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
+		       uint64_t addr, struct amdgpu_bo **bo);
+
+#include "amdgpu_object.h"
+
+#endif

+ 768 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c

@@ -0,0 +1,768 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/slab.h>
+#include <linux/power_supply.h>
+#include <linux/vga_switcheroo.h>
+#include <acpi/video.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "amdgpu.h"
+#include "amdgpu_acpi.h"
+#include "atom.h"
+
+#define ACPI_AC_CLASS           "ac_adapter"
+
+extern void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
+
+struct atif_verify_interface {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u16 version;		/* version */
+	u32 notification_mask;	/* supported notifications mask */
+	u32 function_bits;	/* supported functions bit vector */
+} __packed;
+
+struct atif_system_params {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u32 valid_mask;		/* valid flags mask */
+	u32 flags;		/* flags */
+	u8 command_code;	/* notify command code */
+} __packed;
+
+struct atif_sbios_requests {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u32 pending;		/* pending sbios requests */
+	u8 panel_exp_mode;	/* panel expansion mode */
+	u8 thermal_gfx;		/* thermal state: target gfx controller */
+	u8 thermal_state;	/* thermal state: state id (0: exit state, non-0: state) */
+	u8 forced_power_gfx;	/* forced power state: target gfx controller */
+	u8 forced_power_state;	/* forced power state: state id */
+	u8 system_power_src;	/* system power source */
+	u8 backlight_level;	/* panel backlight level (0-255) */
+} __packed;
+
+#define ATIF_NOTIFY_MASK	0x3
+#define ATIF_NOTIFY_NONE	0
+#define ATIF_NOTIFY_81		1
+#define ATIF_NOTIFY_N		2
+
+struct atcs_verify_interface {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u16 version;		/* version */
+	u32 function_bits;	/* supported functions bit vector */
+} __packed;
+
+#define ATCS_VALID_FLAGS_MASK	0x3
+
+struct atcs_pref_req_input {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u16 client_id;		/* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
+	u16 valid_flags_mask;	/* valid flags mask */
+	u16 flags;		/* flags */
+	u8 req_type;		/* request type */
+	u8 perf_req;		/* performance request */
+} __packed;
+
+struct atcs_pref_req_output {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u8 ret_val;		/* return value */
+} __packed;
+
+/* Call the ATIF method
+ */
+/**
+ * amdgpu_atif_call - call an ATIF method
+ *
+ * @handle: acpi handle
+ * @function: the ATIF function to execute
+ * @params: ATIF function params
+ *
+ * Executes the requested ATIF function (all asics).
+ * Returns a pointer to the acpi output buffer.
+ */
+static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
+		struct acpi_buffer *params)
+{
+	acpi_status status;
+	union acpi_object atif_arg_elements[2];
+	struct acpi_object_list atif_arg;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+	atif_arg.count = 2;
+	atif_arg.pointer = &atif_arg_elements[0];
+
+	atif_arg_elements[0].type = ACPI_TYPE_INTEGER;
+	atif_arg_elements[0].integer.value = function;
+
+	if (params) {
+		atif_arg_elements[1].type = ACPI_TYPE_BUFFER;
+		atif_arg_elements[1].buffer.length = params->length;
+		atif_arg_elements[1].buffer.pointer = params->pointer;
+	} else {
+		/* We need a second fake parameter */
+		atif_arg_elements[1].type = ACPI_TYPE_INTEGER;
+		atif_arg_elements[1].integer.value = 0;
+	}
+
+	status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
+
+	/* Fail only if calling the method fails and ATIF is supported */
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
+				 acpi_format_exception(status));
+		kfree(buffer.pointer);
+		return NULL;
+	}
+
+	return buffer.pointer;
+}
+
+/**
+ * amdgpu_atif_parse_notification - parse supported notifications
+ *
+ * @n: supported notifications struct
+ * @mask: supported notifications mask from ATIF
+ *
+ * Use the supported notifications mask from ATIF function
+ * ATIF_FUNCTION_VERIFY_INTERFACE to determine what notifications
+ * are supported (all asics).
+ */
+static void amdgpu_atif_parse_notification(struct amdgpu_atif_notifications *n, u32 mask)
+{
+	n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED;
+	n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED;
+	n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED;
+	n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED;
+	n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED;
+	n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED;
+	n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED;
+	n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED;
+	n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED;
+}
+
+/**
+ * amdgpu_atif_parse_functions - parse supported functions
+ *
+ * @f: supported functions struct
+ * @mask: supported functions mask from ATIF
+ *
+ * Use the supported functions mask from ATIF function
+ * ATIF_FUNCTION_VERIFY_INTERFACE to determine what functions
+ * are supported (all asics).
+ */
+static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mask)
+{
+	f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED;
+	f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED;
+	f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED;
+	f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED;
+	f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED;
+	f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED;
+	f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED;
+	f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED;
+	f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED;
+	f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED;
+}
+
+/**
+ * amdgpu_atif_verify_interface - verify ATIF
+ *
+ * @handle: acpi handle
+ * @atif: amdgpu atif struct
+ *
+ * Execute the ATIF_FUNCTION_VERIFY_INTERFACE ATIF function
+ * to initialize ATIF and determine what features are supported
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int amdgpu_atif_verify_interface(acpi_handle handle,
+		struct amdgpu_atif *atif)
+{
+	union acpi_object *info;
+	struct atif_verify_interface output;
+	size_t size;
+	int err = 0;
+
+	info = amdgpu_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
+	if (!info)
+		return -EIO;
+
+	memset(&output, 0, sizeof(output));
+
+	size = *(u16 *) info->buffer.pointer;
+	if (size < 12) {
+		DRM_INFO("ATIF buffer is too small: %zu\n", size);
+		err = -EINVAL;
+		goto out;
+	}
+	size = min(sizeof(output), size);
+
+	memcpy(&output, info->buffer.pointer, size);
+
+	/* TODO: check version? */
+	DRM_DEBUG_DRIVER("ATIF version %u\n", output.version);
+
+	amdgpu_atif_parse_notification(&atif->notifications, output.notification_mask);
+	amdgpu_atif_parse_functions(&atif->functions, output.function_bits);
+
+out:
+	kfree(info);
+	return err;
+}
+
+/**
+ * amdgpu_atif_get_notification_params - determine notify configuration
+ *
+ * @handle: acpi handle
+ * @n: atif notification configuration struct
+ *
+ * Execute the ATIF_FUNCTION_GET_SYSTEM_PARAMETERS ATIF function
+ * to determine if a notifier is used and if so which one
+ * (all asics).  This is either Notify(VGA, 0x81) or Notify(VGA, n)
+ * where n is specified in the result if a notifier is used.
+ * Returns 0 on success, error on failure.
+ */
+static int amdgpu_atif_get_notification_params(acpi_handle handle,
+		struct amdgpu_atif_notification_cfg *n)
+{
+	union acpi_object *info;
+	struct atif_system_params params;
+	size_t size;
+	int err = 0;
+
+	info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL);
+	if (!info) {
+		err = -EIO;
+		goto out;
+	}
+
+	size = *(u16 *) info->buffer.pointer;
+	if (size < 10) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	memset(&params, 0, sizeof(params));
+	size = min(sizeof(params), size);
+	memcpy(&params, info->buffer.pointer, size);
+
+	DRM_DEBUG_DRIVER("SYSTEM_PARAMS: mask = %#x, flags = %#x\n",
+			params.flags, params.valid_mask);
+	params.flags = params.flags & params.valid_mask;
+
+	if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_NONE) {
+		n->enabled = false;
+		n->command_code = 0;
+	} else if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_81) {
+		n->enabled = true;
+		n->command_code = 0x81;
+	} else {
+		if (size < 11) {
+			err = -EINVAL;
+			goto out;
+		}
+		n->enabled = true;
+		n->command_code = params.command_code;
+	}
+
+out:
+	DRM_DEBUG_DRIVER("Notification %s, command code = %#x\n",
+			(n->enabled ? "enabled" : "disabled"),
+			n->command_code);
+	kfree(info);
+	return err;
+}
+
+/**
+ * amdgpu_atif_get_sbios_requests - get requested sbios event
+ *
+ * @handle: acpi handle
+ * @req: atif sbios request struct
+ *
+ * Execute the ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS ATIF function
+ * to determine what requests the sbios is making to the driver
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int amdgpu_atif_get_sbios_requests(acpi_handle handle,
+		struct atif_sbios_requests *req)
+{
+	union acpi_object *info;
+	size_t size;
+	int count = 0;
+
+	info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL);
+	if (!info)
+		return -EIO;
+
+	size = *(u16 *)info->buffer.pointer;
+	if (size < 0xd) {
+		count = -EINVAL;
+		goto out;
+	}
+	memset(req, 0, sizeof(*req));
+
+	size = min(sizeof(*req), size);
+	memcpy(req, info->buffer.pointer, size);
+	DRM_DEBUG_DRIVER("SBIOS pending requests: %#x\n", req->pending);
+
+	count = hweight32(req->pending);
+
+out:
+	kfree(info);
+	return count;
+}
+
+/**
+ * amdgpu_atif_handler - handle ATIF notify requests
+ *
+ * @adev: amdgpu_device pointer
+ * @event: atif sbios request struct
+ *
+ * Checks the acpi event and if it matches an atif event,
+ * handles it.
+ * Returns NOTIFY code
+ */
+int amdgpu_atif_handler(struct amdgpu_device *adev,
+			struct acpi_bus_event *event)
+{
+	struct amdgpu_atif *atif = &adev->atif;
+	struct atif_sbios_requests req;
+	acpi_handle handle;
+	int count;
+
+	DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
+			event->device_class, event->type);
+
+	if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
+		return NOTIFY_DONE;
+
+	if (!atif->notification_cfg.enabled ||
+	    event->type != atif->notification_cfg.command_code)
+		/* Not our event */
+		return NOTIFY_DONE;
+
+	/* Check pending SBIOS requests */
+	handle = ACPI_HANDLE(&adev->pdev->dev);
+	count = amdgpu_atif_get_sbios_requests(handle, &req);
+
+	if (count <= 0)
+		return NOTIFY_DONE;
+
+	DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
+
+	if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
+		struct amdgpu_encoder *enc = atif->encoder_for_bl;
+
+		if (enc) {
+			struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
+
+			DRM_DEBUG_DRIVER("Changing brightness to %d\n",
+					req.backlight_level);
+
+			amdgpu_display_backlight_set_level(adev, enc, req.backlight_level);
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+			backlight_force_update(dig->bl_dev,
+					       BACKLIGHT_UPDATE_HOTKEY);
+#endif
+		}
+	}
+	/* TODO: check other events */
+
+	/* We've handled the event, stop the notifier chain. The ACPI interface
+	 * overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to
+	 * userspace if the event was generated only to signal a SBIOS
+	 * request.
+	 */
+	return NOTIFY_BAD;
+}
+
+/* Call the ATCS method
+ */
+/**
+ * amdgpu_atcs_call - call an ATCS method
+ *
+ * @handle: acpi handle
+ * @function: the ATCS function to execute
+ * @params: ATCS function params
+ *
+ * Executes the requested ATCS function (all asics).
+ * Returns a pointer to the acpi output buffer.
+ */
+static union acpi_object *amdgpu_atcs_call(acpi_handle handle, int function,
+					   struct acpi_buffer *params)
+{
+	acpi_status status;
+	union acpi_object atcs_arg_elements[2];
+	struct acpi_object_list atcs_arg;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+	atcs_arg.count = 2;
+	atcs_arg.pointer = &atcs_arg_elements[0];
+
+	atcs_arg_elements[0].type = ACPI_TYPE_INTEGER;
+	atcs_arg_elements[0].integer.value = function;
+
+	if (params) {
+		atcs_arg_elements[1].type = ACPI_TYPE_BUFFER;
+		atcs_arg_elements[1].buffer.length = params->length;
+		atcs_arg_elements[1].buffer.pointer = params->pointer;
+	} else {
+		/* We need a second fake parameter */
+		atcs_arg_elements[1].type = ACPI_TYPE_INTEGER;
+		atcs_arg_elements[1].integer.value = 0;
+	}
+
+	status = acpi_evaluate_object(handle, "ATCS", &atcs_arg, &buffer);
+
+	/* Fail only if calling the method fails and ATIF is supported */
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		DRM_DEBUG_DRIVER("failed to evaluate ATCS got %s\n",
+				 acpi_format_exception(status));
+		kfree(buffer.pointer);
+		return NULL;
+	}
+
+	return buffer.pointer;
+}
+
+/**
+ * amdgpu_atcs_parse_functions - parse supported functions
+ *
+ * @f: supported functions struct
+ * @mask: supported functions mask from ATCS
+ *
+ * Use the supported functions mask from ATCS function
+ * ATCS_FUNCTION_VERIFY_INTERFACE to determine what functions
+ * are supported (all asics).
+ */
+static void amdgpu_atcs_parse_functions(struct amdgpu_atcs_functions *f, u32 mask)
+{
+	f->get_ext_state = mask & ATCS_GET_EXTERNAL_STATE_SUPPORTED;
+	f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED;
+	f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED;
+	f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED;
+}
+
+/**
+ * amdgpu_atcs_verify_interface - verify ATCS
+ *
+ * @handle: acpi handle
+ * @atcs: amdgpu atcs struct
+ *
+ * Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function
+ * to initialize ATCS and determine what features are supported
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int amdgpu_atcs_verify_interface(acpi_handle handle,
+					struct amdgpu_atcs *atcs)
+{
+	union acpi_object *info;
+	struct atcs_verify_interface output;
+	size_t size;
+	int err = 0;
+
+	info = amdgpu_atcs_call(handle, ATCS_FUNCTION_VERIFY_INTERFACE, NULL);
+	if (!info)
+		return -EIO;
+
+	memset(&output, 0, sizeof(output));
+
+	size = *(u16 *) info->buffer.pointer;
+	if (size < 8) {
+		DRM_INFO("ATCS buffer is too small: %zu\n", size);
+		err = -EINVAL;
+		goto out;
+	}
+	size = min(sizeof(output), size);
+
+	memcpy(&output, info->buffer.pointer, size);
+
+	/* TODO: check version? */
+	DRM_DEBUG_DRIVER("ATCS version %u\n", output.version);
+
+	amdgpu_atcs_parse_functions(&atcs->functions, output.function_bits);
+
+out:
+	kfree(info);
+	return err;
+}
+
+/**
+ * amdgpu_acpi_is_pcie_performance_request_supported
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Check if the ATCS pcie_perf_req and pcie_dev_rdy methods
+ * are supported (all asics).
+ * returns true if supported, false if not.
+ */
+bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev)
+{
+	struct amdgpu_atcs *atcs = &adev->atcs;
+
+	if (atcs->functions.pcie_perf_req && atcs->functions.pcie_dev_rdy)
+		return true;
+
+	return false;
+}
+
+/**
+ * amdgpu_acpi_pcie_notify_device_ready
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Executes the PCIE_DEVICE_READY_NOTIFICATION method
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev)
+{
+	acpi_handle handle;
+	union acpi_object *info;
+	struct amdgpu_atcs *atcs = &adev->atcs;
+
+	/* Get the device handle */
+	handle = ACPI_HANDLE(&adev->pdev->dev);
+	if (!handle)
+		return -EINVAL;
+
+	if (!atcs->functions.pcie_dev_rdy)
+		return -EINVAL;
+
+	info = amdgpu_atcs_call(handle, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL);
+	if (!info)
+		return -EIO;
+
+	kfree(info);
+
+	return 0;
+}
+
+/**
+ * amdgpu_acpi_pcie_performance_request
+ *
+ * @adev: amdgpu_device pointer
+ * @perf_req: requested perf level (pcie gen speed)
+ * @advertise: set advertise caps flag if set
+ *
+ * Executes the PCIE_PERFORMANCE_REQUEST method to
+ * change the pcie gen speed (all asics).
+ * returns 0 on success, error on failure.
+ */
+int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
+					 u8 perf_req, bool advertise)
+{
+	acpi_handle handle;
+	union acpi_object *info;
+	struct amdgpu_atcs *atcs = &adev->atcs;
+	struct atcs_pref_req_input atcs_input;
+	struct atcs_pref_req_output atcs_output;
+	struct acpi_buffer params;
+	size_t size;
+	u32 retry = 3;
+
+	/* Get the device handle */
+	handle = ACPI_HANDLE(&adev->pdev->dev);
+	if (!handle)
+		return -EINVAL;
+
+	if (!atcs->functions.pcie_perf_req)
+		return -EINVAL;
+
+	atcs_input.size = sizeof(struct atcs_pref_req_input);
+	/* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
+	atcs_input.client_id = adev->pdev->devfn | (adev->pdev->bus->number << 8);
+	atcs_input.valid_flags_mask = ATCS_VALID_FLAGS_MASK;
+	atcs_input.flags = ATCS_WAIT_FOR_COMPLETION;
+	if (advertise)
+		atcs_input.flags |= ATCS_ADVERTISE_CAPS;
+	atcs_input.req_type = ATCS_PCIE_LINK_SPEED;
+	atcs_input.perf_req = perf_req;
+
+	params.length = sizeof(struct atcs_pref_req_input);
+	params.pointer = &atcs_input;
+
+	while (retry--) {
+		info = amdgpu_atcs_call(handle, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, &params);
+		if (!info)
+			return -EIO;
+
+		memset(&atcs_output, 0, sizeof(atcs_output));
+
+		size = *(u16 *) info->buffer.pointer;
+		if (size < 3) {
+			DRM_INFO("ATCS buffer is too small: %zu\n", size);
+			kfree(info);
+			return -EINVAL;
+		}
+		size = min(sizeof(atcs_output), size);
+
+		memcpy(&atcs_output, info->buffer.pointer, size);
+
+		kfree(info);
+
+		switch (atcs_output.ret_val) {
+		case ATCS_REQUEST_REFUSED:
+		default:
+			return -EINVAL;
+		case ATCS_REQUEST_COMPLETE:
+			return 0;
+		case ATCS_REQUEST_IN_PROGRESS:
+			udelay(10);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * amdgpu_acpi_event - handle notify events
+ *
+ * @nb: notifier block
+ * @val: val
+ * @data: acpi event
+ *
+ * Calls relevant amdgpu functions in response to various
+ * acpi events.
+ * Returns NOTIFY code
+ */
+static int amdgpu_acpi_event(struct notifier_block *nb,
+			     unsigned long val,
+			     void *data)
+{
+	struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, acpi_nb);
+	struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
+
+	if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
+		if (power_supply_is_system_supplied() > 0)
+			DRM_DEBUG_DRIVER("pm: AC\n");
+		else
+			DRM_DEBUG_DRIVER("pm: DC\n");
+
+		amdgpu_pm_acpi_event_handler(adev);
+	}
+
+	/* Check for pending SBIOS requests */
+	return amdgpu_atif_handler(adev, entry);
+}
+
+/* Call all ACPI methods here */
+/**
+ * amdgpu_acpi_init - init driver acpi support
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Verifies the AMD ACPI interfaces and registers with the acpi
+ * notifier chain (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int amdgpu_acpi_init(struct amdgpu_device *adev)
+{
+	acpi_handle handle;
+	struct amdgpu_atif *atif = &adev->atif;
+	struct amdgpu_atcs *atcs = &adev->atcs;
+	int ret;
+
+	/* Get the device handle */
+	handle = ACPI_HANDLE(&adev->pdev->dev);
+
+	if (!adev->bios || !handle)
+		return 0;
+
+	/* Call the ATCS method */
+	ret = amdgpu_atcs_verify_interface(handle, atcs);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
+	}
+
+	/* Call the ATIF method */
+	ret = amdgpu_atif_verify_interface(handle, atif);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
+		goto out;
+	}
+
+	if (atif->notifications.brightness_change) {
+		struct drm_encoder *tmp;
+
+		/* Find the encoder controlling the brightness */
+		list_for_each_entry(tmp, &adev->ddev->mode_config.encoder_list,
+				head) {
+			struct amdgpu_encoder *enc = to_amdgpu_encoder(tmp);
+
+			if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
+			    enc->enc_priv) {
+				if (adev->is_atom_bios) {
+					struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
+					if (dig->bl_dev) {
+						atif->encoder_for_bl = enc;
+						break;
+					}
+				}
+			}
+		}
+	}
+
+	if (atif->functions.sbios_requests && !atif->functions.system_params) {
+		/* XXX check this workraround, if sbios request function is
+		 * present we have to see how it's configured in the system
+		 * params
+		 */
+		atif->functions.system_params = true;
+	}
+
+	if (atif->functions.system_params) {
+		ret = amdgpu_atif_get_notification_params(handle,
+				&atif->notification_cfg);
+		if (ret) {
+			DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
+					ret);
+			/* Disable notification */
+			atif->notification_cfg.enabled = false;
+		}
+	}
+
+out:
+	adev->acpi_nb.notifier_call = amdgpu_acpi_event;
+	register_acpi_notifier(&adev->acpi_nb);
+
+	return ret;
+}
+
+/**
+ * amdgpu_acpi_fini - tear down driver acpi support
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Unregisters with the acpi notifier chain (all asics).
+ */
+void amdgpu_acpi_fini(struct amdgpu_device *adev)
+{
+	unregister_acpi_notifier(&adev->acpi_nb);
+}

+ 445 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.h

@@ -0,0 +1,445 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_ACPI_H
+#define AMDGPU_ACPI_H
+
+struct amdgpu_device;
+struct acpi_bus_event;
+
+int amdgpu_atif_handler(struct amdgpu_device *adev,
+		struct acpi_bus_event *event);
+
+/* AMD hw uses four ACPI control methods:
+ * 1. ATIF
+ * ARG0: (ACPI_INTEGER) function code
+ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
+ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
+ * ATIF provides an entry point for the gfx driver to interact with the sbios.
+ * The AMD ACPI notification mechanism uses Notify (VGA, 0x81) or a custom
+ * notification. Which notification is used as indicated by the ATIF Control
+ * Method GET_SYSTEM_PARAMETERS. When the driver receives Notify (VGA, 0x81) or
+ * a custom notification it invokes ATIF Control Method GET_SYSTEM_BIOS_REQUESTS
+ * to identify pending System BIOS requests and associated parameters. For
+ * example, if one of the pending requests is DISPLAY_SWITCH_REQUEST, the driver
+ * will perform display device detection and invoke ATIF Control Method
+ * SELECT_ACTIVE_DISPLAYS.
+ *
+ * 2. ATPX
+ * ARG0: (ACPI_INTEGER) function code
+ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
+ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
+ * ATPX methods are used on PowerXpress systems to handle mux switching and
+ * discrete GPU power control.
+ *
+ * 3. ATRM
+ * ARG0: (ACPI_INTEGER) offset of vbios rom data
+ * ARG1: (ACPI_BUFFER) size of the buffer to fill (up to 4K).
+ * OUTPUT: (ACPI_BUFFER) output buffer
+ * ATRM provides an interfacess to access the discrete GPU vbios image on
+ * PowerXpress systems with multiple GPUs.
+ *
+ * 4. ATCS
+ * ARG0: (ACPI_INTEGER) function code
+ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
+ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
+ * ATCS provides an interface to AMD chipset specific functionality.
+ *
+ */
+/* ATIF */
+#define ATIF_FUNCTION_VERIFY_INTERFACE                             0x0
+/* ARG0: ATIF_FUNCTION_VERIFY_INTERFACE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - version
+ * DWORD - supported notifications mask
+ * DWORD - supported functions bit vector
+ */
+/* Notifications mask */
+#       define ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED               (1 << 0)
+#       define ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED        (1 << 1)
+#       define ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED         (1 << 2)
+#       define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED    (1 << 3)
+#       define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED   (1 << 4)
+#       define ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED          (1 << 5)
+#       define ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED                (1 << 6)
+#       define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED      (1 << 7)
+#       define ATIF_DGPU_DISPLAY_EVENT_SUPPORTED                   (1 << 8)
+/* supported functions vector */
+#       define ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED               (1 << 0)
+#       define ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED            (1 << 1)
+#       define ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED              (1 << 2)
+#       define ATIF_GET_LID_STATE_SUPPORTED                       (1 << 3)
+#       define ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED           (1 << 4)
+#       define ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED             (1 << 5)
+#       define ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED  (1 << 6)
+#       define ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED    (1 << 7)
+#       define ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED     (1 << 12)
+#       define ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED           (1 << 14)
+#define ATIF_FUNCTION_GET_SYSTEM_PARAMETERS                        0x1
+/* ARG0: ATIF_FUNCTION_GET_SYSTEM_PARAMETERS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags
+ *
+ * OR
+ *
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags
+ * BYTE  - notify command code
+ *
+ * flags
+ * bits 1:0:
+ * 0 - Notify(VGA, 0x81) is not used for notification
+ * 1 - Notify(VGA, 0x81) is used for notification
+ * 2 - Notify(VGA, n) is used for notification where
+ * n (0xd0-0xd9) is specified in notify command code.
+ * bit 2:
+ * 1 - lid changes not reported though int10
+ */
+#define ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS                     0x2
+/* ARG0: ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - pending sbios requests
+ * BYTE  - panel expansion mode
+ * BYTE  - thermal state: target gfx controller
+ * BYTE  - thermal state: state id (0: exit state, non-0: state)
+ * BYTE  - forced power state: target gfx controller
+ * BYTE  - forced power state: state id
+ * BYTE  - system power source
+ * BYTE  - panel backlight level (0-255)
+ */
+/* pending sbios requests */
+#       define ATIF_DISPLAY_SWITCH_REQUEST                         (1 << 0)
+#       define ATIF_EXPANSION_MODE_CHANGE_REQUEST                  (1 << 1)
+#       define ATIF_THERMAL_STATE_CHANGE_REQUEST                   (1 << 2)
+#       define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST              (1 << 3)
+#       define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST             (1 << 4)
+#       define ATIF_DISPLAY_CONF_CHANGE_REQUEST                    (1 << 5)
+#       define ATIF_PX_GFX_SWITCH_REQUEST                          (1 << 6)
+#       define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST                (1 << 7)
+#       define ATIF_DGPU_DISPLAY_EVENT                             (1 << 8)
+/* panel expansion mode */
+#       define ATIF_PANEL_EXPANSION_DISABLE                        0
+#       define ATIF_PANEL_EXPANSION_FULL                           1
+#       define ATIF_PANEL_EXPANSION_ASPECT                         2
+/* target gfx controller */
+#       define ATIF_TARGET_GFX_SINGLE                              0
+#       define ATIF_TARGET_GFX_PX_IGPU                             1
+#       define ATIF_TARGET_GFX_PX_DGPU                             2
+/* system power source */
+#       define ATIF_POWER_SOURCE_AC                                1
+#       define ATIF_POWER_SOURCE_DC                                2
+#       define ATIF_POWER_SOURCE_RESTRICTED_AC_1                   3
+#       define ATIF_POWER_SOURCE_RESTRICTED_AC_2                   4
+#define ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS                       0x3
+/* ARG0: ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - selected displays
+ * WORD  - connected displays
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - selected displays
+ */
+#       define ATIF_LCD1                                           (1 << 0)
+#       define ATIF_CRT1                                           (1 << 1)
+#       define ATIF_TV                                             (1 << 2)
+#       define ATIF_DFP1                                           (1 << 3)
+#       define ATIF_CRT2                                           (1 << 4)
+#       define ATIF_LCD2                                           (1 << 5)
+#       define ATIF_DFP2                                           (1 << 7)
+#       define ATIF_CV                                             (1 << 8)
+#       define ATIF_DFP3                                           (1 << 9)
+#       define ATIF_DFP4                                           (1 << 10)
+#       define ATIF_DFP5                                           (1 << 11)
+#       define ATIF_DFP6                                           (1 << 12)
+#define ATIF_FUNCTION_GET_LID_STATE                                0x4
+/* ARG0: ATIF_FUNCTION_GET_LID_STATE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - lid state (0: open, 1: closed)
+ *
+ * GET_LID_STATE only works at boot and resume, for general lid
+ * status, use the kernel provided status
+ */
+#define ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS                    0x5
+/* ARG0: ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - 0
+ * BYTE  - TV standard
+ */
+#       define ATIF_TV_STD_NTSC                                    0
+#       define ATIF_TV_STD_PAL                                     1
+#       define ATIF_TV_STD_PALM                                    2
+#       define ATIF_TV_STD_PAL60                                   3
+#       define ATIF_TV_STD_NTSCJ                                   4
+#       define ATIF_TV_STD_PALCN                                   5
+#       define ATIF_TV_STD_PALN                                    6
+#       define ATIF_TV_STD_SCART_RGB                               9
+#define ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS                      0x6
+/* ARG0: ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - 0
+ * BYTE  - TV standard
+ * OUTPUT: none
+ */
+#define ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS           0x7
+/* ARG0: ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - panel expansion mode
+ */
+#define ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS             0x8
+/* ARG0: ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - panel expansion mode
+ * OUTPUT: none
+ */
+#define ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION              0xD
+/* ARG0: ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - gfx controller id
+ * BYTE  - current temperature (degress Celsius)
+ * OUTPUT: none
+ */
+#define ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES                    0xF
+/* ARG0: ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - number of gfx devices
+ * WORD  - device structure size in bytes (excludes device size field)
+ * DWORD - flags         \
+ * WORD  - bus number     } repeated structure
+ * WORD  - device number /
+ */
+/* flags */
+#       define ATIF_PX_REMOVABLE_GRAPHICS_DEVICE                   (1 << 0)
+#       define ATIF_XGP_PORT                                       (1 << 1)
+#       define ATIF_VGA_ENABLED_GRAPHICS_DEVICE                    (1 << 2)
+#       define ATIF_XGP_PORT_IN_DOCK                               (1 << 3)
+
+/* ATPX */
+#define ATPX_FUNCTION_VERIFY_INTERFACE                             0x0
+/* ARG0: ATPX_FUNCTION_VERIFY_INTERFACE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - version
+ * DWORD - supported functions bit vector
+ */
+/* supported functions vector */
+#       define ATPX_GET_PX_PARAMETERS_SUPPORTED                    (1 << 0)
+#       define ATPX_POWER_CONTROL_SUPPORTED                        (1 << 1)
+#       define ATPX_DISPLAY_MUX_CONTROL_SUPPORTED                  (1 << 2)
+#       define ATPX_I2C_MUX_CONTROL_SUPPORTED                      (1 << 3)
+#       define ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED (1 << 4)
+#       define ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED   (1 << 5)
+#       define ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED       (1 << 7)
+#       define ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED          (1 << 8)
+#define ATPX_FUNCTION_GET_PX_PARAMETERS                            0x1
+/* ARG0: ATPX_FUNCTION_GET_PX_PARAMETERS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags
+ */
+/* flags */
+#       define ATPX_LVDS_I2C_AVAILABLE_TO_BOTH_GPUS                (1 << 0)
+#       define ATPX_CRT1_I2C_AVAILABLE_TO_BOTH_GPUS                (1 << 1)
+#       define ATPX_DVI1_I2C_AVAILABLE_TO_BOTH_GPUS                (1 << 2)
+#       define ATPX_CRT1_RGB_SIGNAL_MUXED                          (1 << 3)
+#       define ATPX_TV_SIGNAL_MUXED                                (1 << 4)
+#       define ATPX_DFP_SIGNAL_MUXED                               (1 << 5)
+#       define ATPX_SEPARATE_MUX_FOR_I2C                           (1 << 6)
+#       define ATPX_DYNAMIC_PX_SUPPORTED                           (1 << 7)
+#       define ATPX_ACF_NOT_SUPPORTED                              (1 << 8)
+#       define ATPX_FIXED_NOT_SUPPORTED                            (1 << 9)
+#       define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED               (1 << 10)
+#       define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS                    (1 << 11)
+#define ATPX_FUNCTION_POWER_CONTROL                                0x2
+/* ARG0: ATPX_FUNCTION_POWER_CONTROL
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - dGPU power state (0: power off, 1: power on)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_DISPLAY_MUX_CONTROL                          0x3
+/* ARG0: ATPX_FUNCTION_DISPLAY_MUX_CONTROL
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - display mux control (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#       define ATPX_INTEGRATED_GPU                                 0
+#       define ATPX_DISCRETE_GPU                                   1
+#define ATPX_FUNCTION_I2C_MUX_CONTROL                              0x4
+/* ARG0: ATPX_FUNCTION_I2C_MUX_CONTROL
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - i2c/aux/hpd mux control (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION    0x5
+/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - target gpu (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION      0x6
+/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - target gpu (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING               0x8
+/* ARG0: ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - number of display connectors
+ * WORD  - connector structure size in bytes (excludes connector size field)
+ * BYTE  - flags                                                     \
+ * BYTE  - ATIF display vector bit position                           } repeated
+ * BYTE  - adapter id (0: iGPU, 1-n: dGPU ordered by pcie bus number) } structure
+ * WORD  - connector ACPI id                                         /
+ */
+/* flags */
+#       define ATPX_DISPLAY_OUTPUT_SUPPORTED_BY_ADAPTER_ID_DEVICE  (1 << 0)
+#       define ATPX_DISPLAY_HPD_SUPPORTED_BY_ADAPTER_ID_DEVICE     (1 << 1)
+#       define ATPX_DISPLAY_I2C_SUPPORTED_BY_ADAPTER_ID_DEVICE     (1 << 2)
+#define ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS                  0x9
+/* ARG0: ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - number of HPD/DDC ports
+ * WORD  - port structure size in bytes (excludes port size field)
+ * BYTE  - ATIF display vector bit position \
+ * BYTE  - hpd id                            } reapeated structure
+ * BYTE  - ddc id                           /
+ *
+ * available on A+A systems only
+ */
+/* hpd id */
+#       define ATPX_HPD_NONE                                       0
+#       define ATPX_HPD1                                           1
+#       define ATPX_HPD2                                           2
+#       define ATPX_HPD3                                           3
+#       define ATPX_HPD4                                           4
+#       define ATPX_HPD5                                           5
+#       define ATPX_HPD6                                           6
+/* ddc id */
+#       define ATPX_DDC_NONE                                       0
+#       define ATPX_DDC1                                           1
+#       define ATPX_DDC2                                           2
+#       define ATPX_DDC3                                           3
+#       define ATPX_DDC4                                           4
+#       define ATPX_DDC5                                           5
+#       define ATPX_DDC6                                           6
+#       define ATPX_DDC7                                           7
+#       define ATPX_DDC8                                           8
+
+/* ATCS */
+#define ATCS_FUNCTION_VERIFY_INTERFACE                             0x0
+/* ARG0: ATCS_FUNCTION_VERIFY_INTERFACE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - version
+ * DWORD - supported functions bit vector
+ */
+/* supported functions vector */
+#       define ATCS_GET_EXTERNAL_STATE_SUPPORTED                   (1 << 0)
+#       define ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED             (1 << 1)
+#       define ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED       (1 << 2)
+#       define ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED                   (1 << 3)
+#define ATCS_FUNCTION_GET_EXTERNAL_STATE                           0x1
+/* ARG0: ATCS_FUNCTION_GET_EXTERNAL_STATE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags (0: undocked, 1: docked)
+ */
+/* flags */
+#       define ATCS_DOCKED                                         (1 << 0)
+#define ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST                     0x2
+/* ARG0: ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
+ * WORD  - valid flags mask
+ * WORD  - flags
+ * BYTE  - request type
+ * BYTE  - performance request
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - return value
+ */
+/* flags */
+#       define ATCS_ADVERTISE_CAPS                                 (1 << 0)
+#       define ATCS_WAIT_FOR_COMPLETION                            (1 << 1)
+/* request type */
+#       define ATCS_PCIE_LINK_SPEED                                1
+/* performance request */
+#       define ATCS_REMOVE                                         0
+#       define ATCS_FORCE_LOW_POWER                                1
+#       define ATCS_PERF_LEVEL_1                                   2 /* PCIE Gen 1 */
+#       define ATCS_PERF_LEVEL_2                                   3 /* PCIE Gen 2 */
+#       define ATCS_PERF_LEVEL_3                                   4 /* PCIE Gen 3 */
+/* return value */
+#       define ATCS_REQUEST_REFUSED                                1
+#       define ATCS_REQUEST_COMPLETE                               2
+#       define ATCS_REQUEST_IN_PROGRESS                            3
+#define ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION               0x3
+/* ARG0: ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION
+ * ARG1: none
+ * OUTPUT: none
+ */
+#define ATCS_FUNCTION_SET_PCIE_BUS_WIDTH                           0x4
+/* ARG0: ATCS_FUNCTION_SET_PCIE_BUS_WIDTH
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
+ * BYTE  - number of active lanes
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - number of active lanes
+ */
+
+#endif

+ 105 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c

@@ -0,0 +1,105 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#include <linux/hdmi.h>
+#include <linux/gcd.h>
+#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+
+static const struct amdgpu_afmt_acr amdgpu_afmt_predefined_acr[] = {
+    /*	     32kHz	  44.1kHz	48kHz    */
+    /* Clock      N     CTS      N     CTS      N     CTS */
+    {  25175,  4096,  25175, 28224, 125875,  6144,  25175 }, /*  25,20/1.001 MHz */
+    {  25200,  4096,  25200,  6272,  28000,  6144,  25200 }, /*  25.20       MHz */
+    {  27000,  4096,  27000,  6272,  30000,  6144,  27000 }, /*  27.00       MHz */
+    {  27027,  4096,  27027,  6272,  30030,  6144,  27027 }, /*  27.00*1.001 MHz */
+    {  54000,  4096,  54000,  6272,  60000,  6144,  54000 }, /*  54.00       MHz */
+    {  54054,  4096,  54054,  6272,  60060,  6144,  54054 }, /*  54.00*1.001 MHz */
+    {  74176,  4096,  74176,  5733,  75335,  6144,  74176 }, /*  74.25/1.001 MHz */
+    {  74250,  4096,  74250,  6272,  82500,  6144,  74250 }, /*  74.25       MHz */
+    { 148352,  4096, 148352,  5733, 150670,  6144, 148352 }, /* 148.50/1.001 MHz */
+    { 148500,  4096, 148500,  6272, 165000,  6144, 148500 }, /* 148.50       MHz */
+};
+
+
+/*
+ * calculate CTS and N values if they are not found in the table
+ */
+static void amdgpu_afmt_calc_cts(uint32_t clock, int *CTS, int *N, int freq)
+{
+	int n, cts;
+	unsigned long div, mul;
+
+	/* Safe, but overly large values */
+	n = 128 * freq;
+	cts = clock * 1000;
+
+	/* Smallest valid fraction */
+	div = gcd(n, cts);
+
+	n /= div;
+	cts /= div;
+
+	/*
+	 * The optimal N is 128*freq/1000. Calculate the closest larger
+	 * value that doesn't truncate any bits.
+	 */
+	mul = ((128*freq/1000) + (n-1))/n;
+
+	n *= mul;
+	cts *= mul;
+
+	/* Check that we are in spec (not always possible) */
+	if (n < (128*freq/1500))
+		printk(KERN_WARNING "Calculated ACR N value is too small. You may experience audio problems.\n");
+	if (n > (128*freq/300))
+		printk(KERN_WARNING "Calculated ACR N value is too large. You may experience audio problems.\n");
+
+	*N = n;
+	*CTS = cts;
+
+	DRM_DEBUG("Calculated ACR timing N=%d CTS=%d for frequency %d\n",
+		  *N, *CTS, freq);
+}
+
+struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock)
+{
+	struct amdgpu_afmt_acr res;
+	u8 i;
+
+	/* Precalculated values for common clocks */
+	for (i = 0; i < ARRAY_SIZE(amdgpu_afmt_predefined_acr); i++) {
+		if (amdgpu_afmt_predefined_acr[i].clock == clock)
+			return amdgpu_afmt_predefined_acr[i];
+	}
+
+	/* And odd clocks get manually calculated */
+	amdgpu_afmt_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
+	amdgpu_afmt_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
+	amdgpu_afmt_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
+
+	return res;
+}

+ 1598 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c

@@ -0,0 +1,1598 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "amdgpu_i2c.h"
+
+#include "atom.h"
+#include "atom-bits.h"
+#include "atombios_encoders.h"
+#include "bif/bif_4_1_d.h"
+
+static void amdgpu_atombios_lookup_i2c_gpio_quirks(struct amdgpu_device *adev,
+					  ATOM_GPIO_I2C_ASSIGMENT *gpio,
+					  u8 index)
+{
+
+}
+
+static struct amdgpu_i2c_bus_rec amdgpu_atombios_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
+{
+	struct amdgpu_i2c_bus_rec i2c;
+
+	memset(&i2c, 0, sizeof(struct amdgpu_i2c_bus_rec));
+
+	i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex);
+	i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex);
+	i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex);
+	i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex);
+	i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex);
+	i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex);
+	i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex);
+	i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex);
+	i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
+	i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
+	i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
+	i2c.en_data_mask = (1 << gpio->ucDataEnShift);
+	i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
+	i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
+	i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
+	i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
+
+	if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
+		i2c.hw_capable = true;
+	else
+		i2c.hw_capable = false;
+
+	if (gpio->sucI2cId.ucAccess == 0xa0)
+		i2c.mm_i2c = true;
+	else
+		i2c.mm_i2c = false;
+
+	i2c.i2c_id = gpio->sucI2cId.ucAccess;
+
+	if (i2c.mask_clk_reg)
+		i2c.valid = true;
+	else
+		i2c.valid = false;
+
+	return i2c;
+}
+
+struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device *adev,
+							  uint8_t id)
+{
+	struct atom_context *ctx = adev->mode_info.atom_context;
+	ATOM_GPIO_I2C_ASSIGMENT *gpio;
+	struct amdgpu_i2c_bus_rec i2c;
+	int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
+	struct _ATOM_GPIO_I2C_INFO *i2c_info;
+	uint16_t data_offset, size;
+	int i, num_indices;
+
+	memset(&i2c, 0, sizeof(struct amdgpu_i2c_bus_rec));
+	i2c.valid = false;
+
+	if (amdgpu_atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+		i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
+
+		num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+			sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+
+		gpio = &i2c_info->asGPIO_Info[0];
+		for (i = 0; i < num_indices; i++) {
+
+			amdgpu_atombios_lookup_i2c_gpio_quirks(adev, gpio, i);
+
+			if (gpio->sucI2cId.ucAccess == id) {
+				i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio);
+				break;
+			}
+			gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
+				((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
+		}
+	}
+
+	return i2c;
+}
+
+void amdgpu_atombios_i2c_init(struct amdgpu_device *adev)
+{
+	struct atom_context *ctx = adev->mode_info.atom_context;
+	ATOM_GPIO_I2C_ASSIGMENT *gpio;
+	struct amdgpu_i2c_bus_rec i2c;
+	int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
+	struct _ATOM_GPIO_I2C_INFO *i2c_info;
+	uint16_t data_offset, size;
+	int i, num_indices;
+	char stmp[32];
+
+	if (amdgpu_atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+		i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
+
+		num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+			sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+
+		gpio = &i2c_info->asGPIO_Info[0];
+		for (i = 0; i < num_indices; i++) {
+			amdgpu_atombios_lookup_i2c_gpio_quirks(adev, gpio, i);
+
+			i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio);
+
+			if (i2c.valid) {
+				sprintf(stmp, "0x%x", i2c.i2c_id);
+				adev->i2c_bus[i] = amdgpu_i2c_create(adev->ddev, &i2c, stmp);
+			}
+			gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
+				((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
+		}
+	}
+}
+
+struct amdgpu_gpio_rec
+amdgpu_atombios_lookup_gpio(struct amdgpu_device *adev,
+			    u8 id)
+{
+	struct atom_context *ctx = adev->mode_info.atom_context;
+	struct amdgpu_gpio_rec gpio;
+	int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT);
+	struct _ATOM_GPIO_PIN_LUT *gpio_info;
+	ATOM_GPIO_PIN_ASSIGNMENT *pin;
+	u16 data_offset, size;
+	int i, num_indices;
+
+	memset(&gpio, 0, sizeof(struct amdgpu_gpio_rec));
+	gpio.valid = false;
+
+	if (amdgpu_atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+		gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
+
+		num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+			sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
+
+		pin = gpio_info->asGPIO_Pin;
+		for (i = 0; i < num_indices; i++) {
+			if (id == pin->ucGPIO_ID) {
+				gpio.id = pin->ucGPIO_ID;
+				gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex);
+				gpio.shift = pin->ucGpioPinBitShift;
+				gpio.mask = (1 << pin->ucGpioPinBitShift);
+				gpio.valid = true;
+				break;
+			}
+			pin = (ATOM_GPIO_PIN_ASSIGNMENT *)
+				((u8 *)pin + sizeof(ATOM_GPIO_PIN_ASSIGNMENT));
+		}
+	}
+
+	return gpio;
+}
+
+static struct amdgpu_hpd
+amdgpu_atombios_get_hpd_info_from_gpio(struct amdgpu_device *adev,
+				       struct amdgpu_gpio_rec *gpio)
+{
+	struct amdgpu_hpd hpd;
+	u32 reg;
+
+	memset(&hpd, 0, sizeof(struct amdgpu_hpd));
+
+	reg = amdgpu_display_hpd_get_gpio_reg(adev);
+
+	hpd.gpio = *gpio;
+	if (gpio->reg == reg) {
+		switch(gpio->mask) {
+		case (1 << 0):
+			hpd.hpd = AMDGPU_HPD_1;
+			break;
+		case (1 << 8):
+			hpd.hpd = AMDGPU_HPD_2;
+			break;
+		case (1 << 16):
+			hpd.hpd = AMDGPU_HPD_3;
+			break;
+		case (1 << 24):
+			hpd.hpd = AMDGPU_HPD_4;
+			break;
+		case (1 << 26):
+			hpd.hpd = AMDGPU_HPD_5;
+			break;
+		case (1 << 28):
+			hpd.hpd = AMDGPU_HPD_6;
+			break;
+		default:
+			hpd.hpd = AMDGPU_HPD_NONE;
+			break;
+		}
+	} else
+		hpd.hpd = AMDGPU_HPD_NONE;
+	return hpd;
+}
+
+static bool amdgpu_atombios_apply_quirks(struct amdgpu_device *adev,
+					 uint32_t supported_device,
+					 int *connector_type,
+					 struct amdgpu_i2c_bus_rec *i2c_bus,
+					 uint16_t *line_mux,
+					 struct amdgpu_hpd *hpd)
+{
+	return true;
+}
+
+static const int object_connector_convert[] = {
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_DVII,
+	DRM_MODE_CONNECTOR_DVII,
+	DRM_MODE_CONNECTOR_DVID,
+	DRM_MODE_CONNECTOR_DVID,
+	DRM_MODE_CONNECTOR_VGA,
+	DRM_MODE_CONNECTOR_Composite,
+	DRM_MODE_CONNECTOR_SVIDEO,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_9PinDIN,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_HDMIA,
+	DRM_MODE_CONNECTOR_HDMIB,
+	DRM_MODE_CONNECTOR_LVDS,
+	DRM_MODE_CONNECTOR_9PinDIN,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_DisplayPort,
+	DRM_MODE_CONNECTOR_eDP,
+	DRM_MODE_CONNECTOR_Unknown
+};
+
+bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev)
+{
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	struct atom_context *ctx = mode_info->atom_context;
+	int index = GetIndexIntoMasterTable(DATA, Object_Header);
+	u16 size, data_offset;
+	u8 frev, crev;
+	ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
+	ATOM_ENCODER_OBJECT_TABLE *enc_obj;
+	ATOM_OBJECT_TABLE *router_obj;
+	ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
+	ATOM_OBJECT_HEADER *obj_header;
+	int i, j, k, path_size, device_support;
+	int connector_type;
+	u16 conn_id, connector_object_id;
+	struct amdgpu_i2c_bus_rec ddc_bus;
+	struct amdgpu_router router;
+	struct amdgpu_gpio_rec gpio;
+	struct amdgpu_hpd hpd;
+
+	if (!amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
+		return false;
+
+	if (crev < 2)
+		return false;
+
+	obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
+	path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
+	    (ctx->bios + data_offset +
+	     le16_to_cpu(obj_header->usDisplayPathTableOffset));
+	con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *)
+	    (ctx->bios + data_offset +
+	     le16_to_cpu(obj_header->usConnectorObjectTableOffset));
+	enc_obj = (ATOM_ENCODER_OBJECT_TABLE *)
+	    (ctx->bios + data_offset +
+	     le16_to_cpu(obj_header->usEncoderObjectTableOffset));
+	router_obj = (ATOM_OBJECT_TABLE *)
+		(ctx->bios + data_offset +
+		 le16_to_cpu(obj_header->usRouterObjectTableOffset));
+	device_support = le16_to_cpu(obj_header->usDeviceSupport);
+
+	path_size = 0;
+	for (i = 0; i < path_obj->ucNumOfDispPath; i++) {
+		uint8_t *addr = (uint8_t *) path_obj->asDispPath;
+		ATOM_DISPLAY_OBJECT_PATH *path;
+		addr += path_size;
+		path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
+		path_size += le16_to_cpu(path->usSize);
+
+		if (device_support & le16_to_cpu(path->usDeviceTag)) {
+			uint8_t con_obj_id, con_obj_num, con_obj_type;
+
+			con_obj_id =
+			    (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
+			    >> OBJECT_ID_SHIFT;
+			con_obj_num =
+			    (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
+			    >> ENUM_ID_SHIFT;
+			con_obj_type =
+			    (le16_to_cpu(path->usConnObjectId) &
+			     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+
+			connector_type =
+				object_connector_convert[con_obj_id];
+			connector_object_id = con_obj_id;
+
+			if (connector_type == DRM_MODE_CONNECTOR_Unknown)
+				continue;
+
+			router.ddc_valid = false;
+			router.cd_valid = false;
+			for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
+				uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
+
+				grph_obj_id =
+				    (le16_to_cpu(path->usGraphicObjIds[j]) &
+				     OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+				grph_obj_num =
+				    (le16_to_cpu(path->usGraphicObjIds[j]) &
+				     ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+				grph_obj_type =
+				    (le16_to_cpu(path->usGraphicObjIds[j]) &
+				     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+
+				if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
+					for (k = 0; k < enc_obj->ucNumberOfObjects; k++) {
+						u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID);
+						if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) {
+							ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
+								(ctx->bios + data_offset +
+								 le16_to_cpu(enc_obj->asObjects[k].usRecordOffset));
+							ATOM_ENCODER_CAP_RECORD *cap_record;
+							u16 caps = 0;
+
+							while (record->ucRecordSize > 0 &&
+							       record->ucRecordType > 0 &&
+							       record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
+								switch (record->ucRecordType) {
+								case ATOM_ENCODER_CAP_RECORD_TYPE:
+									cap_record =(ATOM_ENCODER_CAP_RECORD *)
+										record;
+									caps = le16_to_cpu(cap_record->usEncoderCap);
+									break;
+								}
+								record = (ATOM_COMMON_RECORD_HEADER *)
+									((char *)record + record->ucRecordSize);
+							}
+							amdgpu_display_add_encoder(adev, encoder_obj,
+										    le16_to_cpu(path->usDeviceTag),
+										    caps);
+						}
+					}
+				} else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
+					for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
+						u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
+						if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
+							ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
+								(ctx->bios + data_offset +
+								 le16_to_cpu(router_obj->asObjects[k].usRecordOffset));
+							ATOM_I2C_RECORD *i2c_record;
+							ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
+							ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
+							ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path;
+							ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
+								(ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
+								(ctx->bios + data_offset +
+								 le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset));
+							u8 *num_dst_objs = (u8 *)
+								((u8 *)router_src_dst_table + 1 +
+								 (router_src_dst_table->ucNumberOfSrc * 2));
+							u16 *dst_objs = (u16 *)(num_dst_objs + 1);
+							int enum_id;
+
+							router.router_id = router_obj_id;
+							for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) {
+								if (le16_to_cpu(path->usConnObjectId) ==
+								    le16_to_cpu(dst_objs[enum_id]))
+									break;
+							}
+
+							while (record->ucRecordSize > 0 &&
+							       record->ucRecordType > 0 &&
+							       record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
+								switch (record->ucRecordType) {
+								case ATOM_I2C_RECORD_TYPE:
+									i2c_record =
+										(ATOM_I2C_RECORD *)
+										record;
+									i2c_config =
+										(ATOM_I2C_ID_CONFIG_ACCESS *)
+										&i2c_record->sucI2cId;
+									router.i2c_info =
+										amdgpu_atombios_lookup_i2c_gpio(adev,
+												       i2c_config->
+												       ucAccess);
+									router.i2c_addr = i2c_record->ucI2CAddr >> 1;
+									break;
+								case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
+									ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
+										record;
+									router.ddc_valid = true;
+									router.ddc_mux_type = ddc_path->ucMuxType;
+									router.ddc_mux_control_pin = ddc_path->ucMuxControlPin;
+									router.ddc_mux_state = ddc_path->ucMuxState[enum_id];
+									break;
+								case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE:
+									cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *)
+										record;
+									router.cd_valid = true;
+									router.cd_mux_type = cd_path->ucMuxType;
+									router.cd_mux_control_pin = cd_path->ucMuxControlPin;
+									router.cd_mux_state = cd_path->ucMuxState[enum_id];
+									break;
+								}
+								record = (ATOM_COMMON_RECORD_HEADER *)
+									((char *)record + record->ucRecordSize);
+							}
+						}
+					}
+				}
+			}
+
+			/* look up gpio for ddc, hpd */
+			ddc_bus.valid = false;
+			hpd.hpd = AMDGPU_HPD_NONE;
+			if ((le16_to_cpu(path->usDeviceTag) &
+			     (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
+				for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
+					if (le16_to_cpu(path->usConnObjectId) ==
+					    le16_to_cpu(con_obj->asObjects[j].
+							usObjectID)) {
+						ATOM_COMMON_RECORD_HEADER
+						    *record =
+						    (ATOM_COMMON_RECORD_HEADER
+						     *)
+						    (ctx->bios + data_offset +
+						     le16_to_cpu(con_obj->
+								 asObjects[j].
+								 usRecordOffset));
+						ATOM_I2C_RECORD *i2c_record;
+						ATOM_HPD_INT_RECORD *hpd_record;
+						ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
+
+						while (record->ucRecordSize > 0 &&
+						       record->ucRecordType > 0 &&
+						       record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
+							switch (record->ucRecordType) {
+							case ATOM_I2C_RECORD_TYPE:
+								i2c_record =
+								    (ATOM_I2C_RECORD *)
+									record;
+								i2c_config =
+									(ATOM_I2C_ID_CONFIG_ACCESS *)
+									&i2c_record->sucI2cId;
+								ddc_bus = amdgpu_atombios_lookup_i2c_gpio(adev,
+												 i2c_config->
+												 ucAccess);
+								break;
+							case ATOM_HPD_INT_RECORD_TYPE:
+								hpd_record =
+									(ATOM_HPD_INT_RECORD *)
+									record;
+								gpio = amdgpu_atombios_lookup_gpio(adev,
+											  hpd_record->ucHPDIntGPIOID);
+								hpd = amdgpu_atombios_get_hpd_info_from_gpio(adev, &gpio);
+								hpd.plugged_state = hpd_record->ucPlugged_PinState;
+								break;
+							}
+							record =
+							    (ATOM_COMMON_RECORD_HEADER
+							     *) ((char *)record
+								 +
+								 record->
+								 ucRecordSize);
+						}
+						break;
+					}
+				}
+			}
+
+			/* needed for aux chan transactions */
+			ddc_bus.hpd = hpd.hpd;
+
+			conn_id = le16_to_cpu(path->usConnObjectId);
+
+			if (!amdgpu_atombios_apply_quirks
+			    (adev, le16_to_cpu(path->usDeviceTag), &connector_type,
+			     &ddc_bus, &conn_id, &hpd))
+				continue;
+
+			amdgpu_display_add_connector(adev,
+						      conn_id,
+						      le16_to_cpu(path->usDeviceTag),
+						      connector_type, &ddc_bus,
+						      connector_object_id,
+						      &hpd,
+						      &router);
+
+		}
+	}
+
+	amdgpu_link_encoder_connector(adev->ddev);
+
+	return true;
+}
+
+union firmware_info {
+	ATOM_FIRMWARE_INFO info;
+	ATOM_FIRMWARE_INFO_V1_2 info_12;
+	ATOM_FIRMWARE_INFO_V1_3 info_13;
+	ATOM_FIRMWARE_INFO_V1_4 info_14;
+	ATOM_FIRMWARE_INFO_V2_1 info_21;
+	ATOM_FIRMWARE_INFO_V2_2 info_22;
+};
+
+int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
+{
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+	uint8_t frev, crev;
+	uint16_t data_offset;
+	int ret = -EINVAL;
+
+	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		int i;
+		struct amdgpu_pll *ppll = &adev->clock.ppll[0];
+		struct amdgpu_pll *spll = &adev->clock.spll;
+		struct amdgpu_pll *mpll = &adev->clock.mpll;
+		union firmware_info *firmware_info =
+			(union firmware_info *)(mode_info->atom_context->bios +
+						data_offset);
+		/* pixel clocks */
+		ppll->reference_freq =
+		    le16_to_cpu(firmware_info->info.usReferenceClock);
+		ppll->reference_div = 0;
+
+		if (crev < 2)
+			ppll->pll_out_min =
+				le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
+		else
+			ppll->pll_out_min =
+				le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
+		ppll->pll_out_max =
+		    le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
+
+		if (crev >= 4) {
+			ppll->lcd_pll_out_min =
+				le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
+			if (ppll->lcd_pll_out_min == 0)
+				ppll->lcd_pll_out_min = ppll->pll_out_min;
+			ppll->lcd_pll_out_max =
+				le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
+			if (ppll->lcd_pll_out_max == 0)
+				ppll->lcd_pll_out_max = ppll->pll_out_max;
+		} else {
+			ppll->lcd_pll_out_min = ppll->pll_out_min;
+			ppll->lcd_pll_out_max = ppll->pll_out_max;
+		}
+
+		if (ppll->pll_out_min == 0)
+			ppll->pll_out_min = 64800;
+
+		ppll->pll_in_min =
+		    le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Input);
+		ppll->pll_in_max =
+		    le16_to_cpu(firmware_info->info.usMaxPixelClockPLL_Input);
+
+		ppll->min_post_div = 2;
+		ppll->max_post_div = 0x7f;
+		ppll->min_frac_feedback_div = 0;
+		ppll->max_frac_feedback_div = 9;
+		ppll->min_ref_div = 2;
+		ppll->max_ref_div = 0x3ff;
+		ppll->min_feedback_div = 4;
+		ppll->max_feedback_div = 0xfff;
+		ppll->best_vco = 0;
+
+		for (i = 1; i < AMDGPU_MAX_PPLL; i++)
+			adev->clock.ppll[i] = *ppll;
+
+		/* system clock */
+		spll->reference_freq =
+			le16_to_cpu(firmware_info->info_21.usCoreReferenceClock);
+		spll->reference_div = 0;
+
+		spll->pll_out_min =
+		    le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Output);
+		spll->pll_out_max =
+		    le32_to_cpu(firmware_info->info.ulMaxEngineClockPLL_Output);
+
+		/* ??? */
+		if (spll->pll_out_min == 0)
+			spll->pll_out_min = 64800;
+
+		spll->pll_in_min =
+		    le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Input);
+		spll->pll_in_max =
+		    le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input);
+
+		spll->min_post_div = 1;
+		spll->max_post_div = 1;
+		spll->min_ref_div = 2;
+		spll->max_ref_div = 0xff;
+		spll->min_feedback_div = 4;
+		spll->max_feedback_div = 0xff;
+		spll->best_vco = 0;
+
+		/* memory clock */
+		mpll->reference_freq =
+			le16_to_cpu(firmware_info->info_21.usMemoryReferenceClock);
+		mpll->reference_div = 0;
+
+		mpll->pll_out_min =
+		    le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Output);
+		mpll->pll_out_max =
+		    le32_to_cpu(firmware_info->info.ulMaxMemoryClockPLL_Output);
+
+		/* ??? */
+		if (mpll->pll_out_min == 0)
+			mpll->pll_out_min = 64800;
+
+		mpll->pll_in_min =
+		    le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Input);
+		mpll->pll_in_max =
+		    le16_to_cpu(firmware_info->info.usMaxMemoryClockPLL_Input);
+
+		adev->clock.default_sclk =
+		    le32_to_cpu(firmware_info->info.ulDefaultEngineClock);
+		adev->clock.default_mclk =
+		    le32_to_cpu(firmware_info->info.ulDefaultMemoryClock);
+
+		mpll->min_post_div = 1;
+		mpll->max_post_div = 1;
+		mpll->min_ref_div = 2;
+		mpll->max_ref_div = 0xff;
+		mpll->min_feedback_div = 4;
+		mpll->max_feedback_div = 0xff;
+		mpll->best_vco = 0;
+
+		/* disp clock */
+		adev->clock.default_dispclk =
+			le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
+		if (adev->clock.default_dispclk == 0)
+			adev->clock.default_dispclk = 54000; /* 540 Mhz */
+		adev->clock.dp_extclk =
+			le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
+		adev->clock.current_dispclk = adev->clock.default_dispclk;
+
+		adev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock);
+		if (adev->clock.max_pixel_clock == 0)
+			adev->clock.max_pixel_clock = 40000;
+
+		/* not technically a clock, but... */
+		adev->mode_info.firmware_flags =
+			le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
+
+		ret = 0;
+	}
+
+	adev->pm.current_sclk = adev->clock.default_sclk;
+	adev->pm.current_mclk = adev->clock.default_mclk;
+
+	return ret;
+}
+
+union igp_info {
+	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9;
+};
+
+static void amdgpu_atombios_get_igp_ss_overrides(struct amdgpu_device *adev,
+						 struct amdgpu_atom_ss *ss,
+						 int id)
+{
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+	u16 data_offset, size;
+	union igp_info *igp_info;
+	u8 frev, crev;
+	u16 percentage = 0, rate = 0;
+
+	/* get any igp specific overrides */
+	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		igp_info = (union igp_info *)
+			(mode_info->atom_context->bios + data_offset);
+		switch (crev) {
+		case 6:
+			switch (id) {
+			case ASIC_INTERNAL_SS_ON_TMDS:
+				percentage = le16_to_cpu(igp_info->info_6.usDVISSPercentage);
+				rate = le16_to_cpu(igp_info->info_6.usDVISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_HDMI:
+				percentage = le16_to_cpu(igp_info->info_6.usHDMISSPercentage);
+				rate = le16_to_cpu(igp_info->info_6.usHDMISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_LVDS:
+				percentage = le16_to_cpu(igp_info->info_6.usLvdsSSPercentage);
+				rate = le16_to_cpu(igp_info->info_6.usLvdsSSpreadRateIn10Hz);
+				break;
+			}
+			break;
+		case 7:
+			switch (id) {
+			case ASIC_INTERNAL_SS_ON_TMDS:
+				percentage = le16_to_cpu(igp_info->info_7.usDVISSPercentage);
+				rate = le16_to_cpu(igp_info->info_7.usDVISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_HDMI:
+				percentage = le16_to_cpu(igp_info->info_7.usHDMISSPercentage);
+				rate = le16_to_cpu(igp_info->info_7.usHDMISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_LVDS:
+				percentage = le16_to_cpu(igp_info->info_7.usLvdsSSPercentage);
+				rate = le16_to_cpu(igp_info->info_7.usLvdsSSpreadRateIn10Hz);
+				break;
+			}
+			break;
+		case 8:
+			switch (id) {
+			case ASIC_INTERNAL_SS_ON_TMDS:
+				percentage = le16_to_cpu(igp_info->info_8.usDVISSPercentage);
+				rate = le16_to_cpu(igp_info->info_8.usDVISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_HDMI:
+				percentage = le16_to_cpu(igp_info->info_8.usHDMISSPercentage);
+				rate = le16_to_cpu(igp_info->info_8.usHDMISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_LVDS:
+				percentage = le16_to_cpu(igp_info->info_8.usLvdsSSPercentage);
+				rate = le16_to_cpu(igp_info->info_8.usLvdsSSpreadRateIn10Hz);
+				break;
+			}
+			break;
+		case 9:
+			switch (id) {
+			case ASIC_INTERNAL_SS_ON_TMDS:
+				percentage = le16_to_cpu(igp_info->info_9.usDVISSPercentage);
+				rate = le16_to_cpu(igp_info->info_9.usDVISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_HDMI:
+				percentage = le16_to_cpu(igp_info->info_9.usHDMISSPercentage);
+				rate = le16_to_cpu(igp_info->info_9.usHDMISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_LVDS:
+				percentage = le16_to_cpu(igp_info->info_9.usLvdsSSPercentage);
+				rate = le16_to_cpu(igp_info->info_9.usLvdsSSpreadRateIn10Hz);
+				break;
+			}
+			break;
+		default:
+			DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+			break;
+		}
+		if (percentage)
+			ss->percentage = percentage;
+		if (rate)
+			ss->rate = rate;
+	}
+}
+
+union asic_ss_info {
+	struct _ATOM_ASIC_INTERNAL_SS_INFO info;
+	struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2;
+	struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
+};
+
+union asic_ss_assignment {
+	struct _ATOM_ASIC_SS_ASSIGNMENT v1;
+	struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2;
+	struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3;
+};
+
+bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev,
+				      struct amdgpu_atom_ss *ss,
+				      int id, u32 clock)
+{
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
+	uint16_t data_offset, size;
+	union asic_ss_info *ss_info;
+	union asic_ss_assignment *ss_assign;
+	uint8_t frev, crev;
+	int i, num_indices;
+
+	if (id == ASIC_INTERNAL_MEMORY_SS) {
+		if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT))
+			return false;
+	}
+	if (id == ASIC_INTERNAL_ENGINE_SS) {
+		if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT))
+			return false;
+	}
+
+	memset(ss, 0, sizeof(struct amdgpu_atom_ss));
+	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+
+		ss_info =
+			(union asic_ss_info *)(mode_info->atom_context->bios + data_offset);
+
+		switch (frev) {
+		case 1:
+			num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+				sizeof(ATOM_ASIC_SS_ASSIGNMENT);
+
+			ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info.asSpreadSpectrum[0]);
+			for (i = 0; i < num_indices; i++) {
+				if ((ss_assign->v1.ucClockIndication == id) &&
+				    (clock <= le32_to_cpu(ss_assign->v1.ulTargetClockRange))) {
+					ss->percentage =
+						le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
+					ss->type = ss_assign->v1.ucSpreadSpectrumMode;
+					ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
+					ss->percentage_divider = 100;
+					return true;
+				}
+				ss_assign = (union asic_ss_assignment *)
+					((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT));
+			}
+			break;
+		case 2:
+			num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+				sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+			ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_2.asSpreadSpectrum[0]);
+			for (i = 0; i < num_indices; i++) {
+				if ((ss_assign->v2.ucClockIndication == id) &&
+				    (clock <= le32_to_cpu(ss_assign->v2.ulTargetClockRange))) {
+					ss->percentage =
+						le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
+					ss->type = ss_assign->v2.ucSpreadSpectrumMode;
+					ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
+					ss->percentage_divider = 100;
+					if ((crev == 2) &&
+					    ((id == ASIC_INTERNAL_ENGINE_SS) ||
+					     (id == ASIC_INTERNAL_MEMORY_SS)))
+						ss->rate /= 100;
+					return true;
+				}
+				ss_assign = (union asic_ss_assignment *)
+					((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2));
+			}
+			break;
+		case 3:
+			num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+				sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+			ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_3.asSpreadSpectrum[0]);
+			for (i = 0; i < num_indices; i++) {
+				if ((ss_assign->v3.ucClockIndication == id) &&
+				    (clock <= le32_to_cpu(ss_assign->v3.ulTargetClockRange))) {
+					ss->percentage =
+						le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
+					ss->type = ss_assign->v3.ucSpreadSpectrumMode;
+					ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
+					if (ss_assign->v3.ucSpreadSpectrumMode &
+					    SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK)
+						ss->percentage_divider = 1000;
+					else
+						ss->percentage_divider = 100;
+					if ((id == ASIC_INTERNAL_ENGINE_SS) ||
+					    (id == ASIC_INTERNAL_MEMORY_SS))
+						ss->rate /= 100;
+					if (adev->flags & AMDGPU_IS_APU)
+						amdgpu_atombios_get_igp_ss_overrides(adev, ss, id);
+					return true;
+				}
+				ss_assign = (union asic_ss_assignment *)
+					((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3));
+			}
+			break;
+		default:
+			DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev, crev);
+			break;
+		}
+
+	}
+	return false;
+}
+
+union get_clock_dividers {
+	struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS v1;
+	struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 v2;
+	struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3;
+	struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4;
+	struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5;
+	struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6 v6_in;
+	struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 v6_out;
+};
+
+int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
+				       u8 clock_type,
+				       u32 clock,
+				       bool strobe_mode,
+				       struct atom_clock_dividers *dividers)
+{
+	union get_clock_dividers args;
+	int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL);
+	u8 frev, crev;
+
+	memset(&args, 0, sizeof(args));
+	memset(dividers, 0, sizeof(struct atom_clock_dividers));
+
+	if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
+		return -EINVAL;
+
+	switch (crev) {
+	case 4:
+		/* fusion */
+		args.v4.ulClock = cpu_to_le32(clock);	/* 10 khz */
+
+		amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		dividers->post_divider = dividers->post_div = args.v4.ucPostDiv;
+		dividers->real_clock = le32_to_cpu(args.v4.ulClock);
+		break;
+	case 6:
+		/* CI */
+		/* COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, COMPUTE_GPUCLK_INPUT_FLAG_SCLK */
+		args.v6_in.ulClock.ulComputeClockFlag = clock_type;
+		args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock);	/* 10 khz */
+
+		amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv);
+		dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac);
+		dividers->ref_div = args.v6_out.ucPllRefDiv;
+		dividers->post_div = args.v6_out.ucPllPostDiv;
+		dividers->flags = args.v6_out.ucPllCntlFlag;
+		dividers->real_clock = le32_to_cpu(args.v6_out.ulClock.ulClock);
+		dividers->post_divider = args.v6_out.ulClock.ucPostDiv;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
+					    u32 clock,
+					    bool strobe_mode,
+					    struct atom_mpll_param *mpll_param)
+{
+	COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 args;
+	int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam);
+	u8 frev, crev;
+
+	memset(&args, 0, sizeof(args));
+	memset(mpll_param, 0, sizeof(struct atom_mpll_param));
+
+	if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
+		return -EINVAL;
+
+	switch (frev) {
+	case 2:
+		switch (crev) {
+		case 1:
+			/* SI */
+			args.ulClock = cpu_to_le32(clock);	/* 10 khz */
+			args.ucInputFlag = 0;
+			if (strobe_mode)
+				args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN;
+
+			amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+			mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac);
+			mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv);
+			mpll_param->post_div = args.ucPostDiv;
+			mpll_param->dll_speed = args.ucDllSpeed;
+			mpll_param->bwcntl = args.ucBWCntl;
+			mpll_param->vco_mode =
+				(args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK);
+			mpll_param->yclk_sel =
+				(args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0;
+			mpll_param->qdr =
+				(args.ucPllCntlFlag & MPLL_CNTL_FLAG_QDR_ENABLE) ? 1 : 0;
+			mpll_param->half_rate =
+				(args.ucPllCntlFlag & MPLL_CNTL_FLAG_AD_HALF_RATE) ? 1 : 0;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev)
+{
+	GET_ENGINE_CLOCK_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+	return le32_to_cpu(args.ulReturnEngineClock);
+}
+
+uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev)
+{
+	GET_MEMORY_CLOCK_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+	return le32_to_cpu(args.ulReturnMemoryClock);
+}
+
+void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
+				      uint32_t eng_clock)
+{
+	SET_ENGINE_CLOCK_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
+
+	args.ulTargetEngineClock = cpu_to_le32(eng_clock);	/* 10 khz */
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
+				      uint32_t mem_clock)
+{
+	SET_MEMORY_CLOCK_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
+
+	if (adev->flags & AMDGPU_IS_APU)
+		return;
+
+	args.ulTargetMemoryClock = cpu_to_le32(mem_clock);	/* 10 khz */
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
+					     u32 eng_clock, u32 mem_clock)
+{
+	SET_ENGINE_CLOCK_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
+	u32 tmp;
+
+	memset(&args, 0, sizeof(args));
+
+	tmp = eng_clock & SET_CLOCK_FREQ_MASK;
+	tmp |= (COMPUTE_ENGINE_PLL_PARAM << 24);
+
+	args.ulTargetEngineClock = cpu_to_le32(tmp);
+	if (mem_clock)
+		args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK);
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+union set_voltage {
+	struct _SET_VOLTAGE_PS_ALLOCATION alloc;
+	struct _SET_VOLTAGE_PARAMETERS v1;
+	struct _SET_VOLTAGE_PARAMETERS_V2 v2;
+	struct _SET_VOLTAGE_PARAMETERS_V1_3 v3;
+};
+
+void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
+				 u16 voltage_level,
+				 u8 voltage_type)
+{
+	union set_voltage args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+	u8 frev, crev, volt_index = voltage_level;
+
+	if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	/* 0xff01 is a flag rather then an actual voltage */
+	if (voltage_level == 0xff01)
+		return;
+
+	switch (crev) {
+	case 1:
+		args.v1.ucVoltageType = voltage_type;
+		args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
+		args.v1.ucVoltageIndex = volt_index;
+		break;
+	case 2:
+		args.v2.ucVoltageType = voltage_type;
+		args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
+		args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
+		break;
+	case 3:
+		args.v3.ucVoltageType = voltage_type;
+		args.v3.ucVoltageMode = ATOM_SET_VOLTAGE;
+		args.v3.usVoltageLevel = cpu_to_le16(voltage_level);
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		return;
+	}
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
+					      u16 *leakage_id)
+{
+	union set_voltage args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+	u8 frev, crev;
+
+	if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
+		return -EINVAL;
+
+	switch (crev) {
+	case 3:
+	case 4:
+		args.v3.ucVoltageType = 0;
+		args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID;
+		args.v3.usVoltageLevel = 0;
+
+		amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		*leakage_id = le16_to_cpu(args.v3.usVoltageLevel);
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(struct amdgpu_device *adev,
+							     u16 *vddc, u16 *vddci,
+							     u16 virtual_voltage_id,
+							     u16 vbios_voltage_id)
+{
+	int index = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
+	u8 frev, crev;
+	u16 data_offset, size;
+	int i, j;
+	ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
+	u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
+
+	*vddc = 0;
+	*vddci = 0;
+
+	if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
+				    &frev, &crev, &data_offset))
+		return -EINVAL;
+
+	profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
+		(adev->mode_info.atom_context->bios + data_offset);
+
+	switch (frev) {
+	case 1:
+		return -EINVAL;
+	case 2:
+		switch (crev) {
+		case 1:
+			if (size < sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))
+				return -EINVAL;
+			leakage_bin = (u16 *)
+				(adev->mode_info.atom_context->bios + data_offset +
+				 le16_to_cpu(profile->usLeakageBinArrayOffset));
+			vddc_id_buf = (u16 *)
+				(adev->mode_info.atom_context->bios + data_offset +
+				 le16_to_cpu(profile->usElbVDDC_IdArrayOffset));
+			vddc_buf = (u16 *)
+				(adev->mode_info.atom_context->bios + data_offset +
+				 le16_to_cpu(profile->usElbVDDC_LevelArrayOffset));
+			vddci_id_buf = (u16 *)
+				(adev->mode_info.atom_context->bios + data_offset +
+				 le16_to_cpu(profile->usElbVDDCI_IdArrayOffset));
+			vddci_buf = (u16 *)
+				(adev->mode_info.atom_context->bios + data_offset +
+				 le16_to_cpu(profile->usElbVDDCI_LevelArrayOffset));
+
+			if (profile->ucElbVDDC_Num > 0) {
+				for (i = 0; i < profile->ucElbVDDC_Num; i++) {
+					if (vddc_id_buf[i] == virtual_voltage_id) {
+						for (j = 0; j < profile->ucLeakageBinNum; j++) {
+							if (vbios_voltage_id <= leakage_bin[j]) {
+								*vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
+								break;
+							}
+						}
+						break;
+					}
+				}
+			}
+			if (profile->ucElbVDDCI_Num > 0) {
+				for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
+					if (vddci_id_buf[i] == virtual_voltage_id) {
+						for (j = 0; j < profile->ucLeakageBinNum; j++) {
+							if (vbios_voltage_id <= leakage_bin[j]) {
+								*vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
+								break;
+							}
+						}
+						break;
+					}
+				}
+			}
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			return -EINVAL;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+union get_voltage_info {
+	struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in;
+	struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out;
+};
+
+int amdgpu_atombios_get_voltage_evv(struct amdgpu_device *adev,
+				    u16 virtual_voltage_id,
+				    u16 *voltage)
+{
+	int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo);
+	u32 entry_id;
+	u32 count = adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count;
+	union get_voltage_info args;
+
+	for (entry_id = 0; entry_id < count; entry_id++) {
+		if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v ==
+		    virtual_voltage_id)
+			break;
+	}
+
+	if (entry_id >= count)
+		return -EINVAL;
+
+	args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
+	args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
+	args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id);
+	args.in.ulSCLKFreq =
+		cpu_to_le32(adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	*voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
+
+	return 0;
+}
+
+union voltage_object_info {
+	struct _ATOM_VOLTAGE_OBJECT_INFO v1;
+	struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2;
+	struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3;
+};
+
+union voltage_object {
+	struct _ATOM_VOLTAGE_OBJECT v1;
+	struct _ATOM_VOLTAGE_OBJECT_V2 v2;
+	union _ATOM_VOLTAGE_OBJECT_V3 v3;
+};
+
+
+static ATOM_VOLTAGE_OBJECT_V3 *amdgpu_atombios_lookup_voltage_object_v3(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *v3,
+									u8 voltage_type, u8 voltage_mode)
+{
+	u32 size = le16_to_cpu(v3->sHeader.usStructureSize);
+	u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]);
+	u8 *start = (u8*)v3;
+
+	while (offset < size) {
+		ATOM_VOLTAGE_OBJECT_V3 *vo = (ATOM_VOLTAGE_OBJECT_V3 *)(start + offset);
+		if ((vo->asGpioVoltageObj.sHeader.ucVoltageType == voltage_type) &&
+		    (vo->asGpioVoltageObj.sHeader.ucVoltageMode == voltage_mode))
+			return vo;
+		offset += le16_to_cpu(vo->asGpioVoltageObj.sHeader.usSize);
+	}
+	return NULL;
+}
+
+bool
+amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev,
+				u8 voltage_type, u8 voltage_mode)
+{
+	int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
+	u8 frev, crev;
+	u16 data_offset, size;
+	union voltage_object_info *voltage_info;
+
+	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		voltage_info = (union voltage_object_info *)
+			(adev->mode_info.atom_context->bios + data_offset);
+
+		switch (frev) {
+		case 3:
+			switch (crev) {
+			case 1:
+				if (amdgpu_atombios_lookup_voltage_object_v3(&voltage_info->v3,
+								  voltage_type, voltage_mode))
+					return true;
+				break;
+			default:
+				DRM_ERROR("unknown voltage object table\n");
+				return false;
+			}
+			break;
+		default:
+			DRM_ERROR("unknown voltage object table\n");
+			return false;
+		}
+
+	}
+	return false;
+}
+
+int amdgpu_atombios_get_voltage_table(struct amdgpu_device *adev,
+				      u8 voltage_type, u8 voltage_mode,
+				      struct atom_voltage_table *voltage_table)
+{
+	int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
+	u8 frev, crev;
+	u16 data_offset, size;
+	int i;
+	union voltage_object_info *voltage_info;
+	union voltage_object *voltage_object = NULL;
+
+	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		voltage_info = (union voltage_object_info *)
+			(adev->mode_info.atom_context->bios + data_offset);
+
+		switch (frev) {
+		case 3:
+			switch (crev) {
+			case 1:
+				voltage_object = (union voltage_object *)
+					amdgpu_atombios_lookup_voltage_object_v3(&voltage_info->v3,
+								      voltage_type, voltage_mode);
+				if (voltage_object) {
+					ATOM_GPIO_VOLTAGE_OBJECT_V3 *gpio =
+						&voltage_object->v3.asGpioVoltageObj;
+					VOLTAGE_LUT_ENTRY_V2 *lut;
+					if (gpio->ucGpioEntryNum > MAX_VOLTAGE_ENTRIES)
+						return -EINVAL;
+					lut = &gpio->asVolGpioLut[0];
+					for (i = 0; i < gpio->ucGpioEntryNum; i++) {
+						voltage_table->entries[i].value =
+							le16_to_cpu(lut->usVoltageValue);
+						voltage_table->entries[i].smio_low =
+							le32_to_cpu(lut->ulVoltageId);
+						lut = (VOLTAGE_LUT_ENTRY_V2 *)
+							((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY_V2));
+					}
+					voltage_table->mask_low = le32_to_cpu(gpio->ulGpioMaskVal);
+					voltage_table->count = gpio->ucGpioEntryNum;
+					voltage_table->phase_delay = gpio->ucPhaseDelay;
+					return 0;
+				}
+				break;
+			default:
+				DRM_ERROR("unknown voltage object table\n");
+				return -EINVAL;
+			}
+			break;
+		default:
+			DRM_ERROR("unknown voltage object table\n");
+			return -EINVAL;
+		}
+	}
+	return -EINVAL;
+}
+
+union vram_info {
+	struct _ATOM_VRAM_INFO_V3 v1_3;
+	struct _ATOM_VRAM_INFO_V4 v1_4;
+	struct _ATOM_VRAM_INFO_HEADER_V2_1 v2_1;
+};
+
+#define MEM_ID_MASK           0xff000000
+#define MEM_ID_SHIFT          24
+#define CLOCK_RANGE_MASK      0x00ffffff
+#define CLOCK_RANGE_SHIFT     0
+#define LOW_NIBBLE_MASK       0xf
+#define DATA_EQU_PREV         0
+#define DATA_FROM_TABLE       4
+
+int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
+				      u8 module_index,
+				      struct atom_mc_reg_table *reg_table)
+{
+	int index = GetIndexIntoMasterTable(DATA, VRAM_Info);
+	u8 frev, crev, num_entries, t_mem_id, num_ranges = 0;
+	u32 i = 0, j;
+	u16 data_offset, size;
+	union vram_info *vram_info;
+
+	memset(reg_table, 0, sizeof(struct atom_mc_reg_table));
+
+	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		vram_info = (union vram_info *)
+			(adev->mode_info.atom_context->bios + data_offset);
+		switch (frev) {
+		case 1:
+			DRM_ERROR("old table version %d, %d\n", frev, crev);
+			return -EINVAL;
+		case 2:
+			switch (crev) {
+			case 1:
+				if (module_index < vram_info->v2_1.ucNumOfVRAMModule) {
+					ATOM_INIT_REG_BLOCK *reg_block =
+						(ATOM_INIT_REG_BLOCK *)
+						((u8 *)vram_info + le16_to_cpu(vram_info->v2_1.usMemClkPatchTblOffset));
+					ATOM_MEMORY_SETTING_DATA_BLOCK *reg_data =
+						(ATOM_MEMORY_SETTING_DATA_BLOCK *)
+						((u8 *)reg_block + (2 * sizeof(u16)) +
+						 le16_to_cpu(reg_block->usRegIndexTblSize));
+					ATOM_INIT_REG_INDEX_FORMAT *format = &reg_block->asRegIndexBuf[0];
+					num_entries = (u8)((le16_to_cpu(reg_block->usRegIndexTblSize)) /
+							   sizeof(ATOM_INIT_REG_INDEX_FORMAT)) - 1;
+					if (num_entries > VBIOS_MC_REGISTER_ARRAY_SIZE)
+						return -EINVAL;
+					while (i < num_entries) {
+						if (format->ucPreRegDataLength & ACCESS_PLACEHOLDER)
+							break;
+						reg_table->mc_reg_address[i].s1 =
+							(u16)(le16_to_cpu(format->usRegIndex));
+						reg_table->mc_reg_address[i].pre_reg_data =
+							(u8)(format->ucPreRegDataLength);
+						i++;
+						format = (ATOM_INIT_REG_INDEX_FORMAT *)
+							((u8 *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
+					}
+					reg_table->last = i;
+					while ((le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK) &&
+					       (num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES)) {
+						t_mem_id = (u8)((le32_to_cpu(*(u32 *)reg_data) & MEM_ID_MASK)
+								>> MEM_ID_SHIFT);
+						if (module_index == t_mem_id) {
+							reg_table->mc_reg_table_entry[num_ranges].mclk_max =
+								(u32)((le32_to_cpu(*(u32 *)reg_data) & CLOCK_RANGE_MASK)
+								      >> CLOCK_RANGE_SHIFT);
+							for (i = 0, j = 1; i < reg_table->last; i++) {
+								if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_FROM_TABLE) {
+									reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
+										(u32)le32_to_cpu(*((u32 *)reg_data + j));
+									j++;
+								} else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
+									reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
+										reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1];
+								}
+							}
+							num_ranges++;
+						}
+						reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
+							((u8 *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize));
+					}
+					if (le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK)
+						return -EINVAL;
+					reg_table->num_entries = num_ranges;
+				} else
+					return -EINVAL;
+				break;
+			default:
+				DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+				return -EINVAL;
+			}
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			return -EINVAL;
+		}
+		return 0;
+	}
+	return -EINVAL;
+}
+
+void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock)
+{
+	uint32_t bios_6_scratch;
+
+	bios_6_scratch = RREG32(mmBIOS_SCRATCH_6);
+
+	if (lock) {
+		bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
+		bios_6_scratch &= ~ATOM_S6_ACC_MODE;
+	} else {
+		bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
+		bios_6_scratch |= ATOM_S6_ACC_MODE;
+	}
+
+	WREG32(mmBIOS_SCRATCH_6, bios_6_scratch);
+}
+
+void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
+{
+	uint32_t bios_2_scratch, bios_6_scratch;
+
+	bios_2_scratch = RREG32(mmBIOS_SCRATCH_2);
+	bios_6_scratch = RREG32(mmBIOS_SCRATCH_6);
+
+	/* let the bios control the backlight */
+	bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
+
+	/* tell the bios not to handle mode switching */
+	bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
+
+	/* clear the vbios dpms state */
+	bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE;
+
+	WREG32(mmBIOS_SCRATCH_2, bios_2_scratch);
+	WREG32(mmBIOS_SCRATCH_6, bios_6_scratch);
+}
+
+void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev)
+{
+	int i;
+
+	for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
+		adev->bios_scratch[i] = RREG32(mmBIOS_SCRATCH_0 + i);
+}
+
+void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
+{
+	int i;
+
+	for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
+		WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
+}
+
+/* Atom needs data in little endian format
+ * so swap as appropriate when copying data to
+ * or from atom. Note that atom operates on
+ * dw units.
+ */
+void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
+{
+#ifdef __BIG_ENDIAN
+	u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
+	u32 *dst32, *src32;
+	int i;
+
+	memcpy(src_tmp, src, num_bytes);
+	src32 = (u32 *)src_tmp;
+	dst32 = (u32 *)dst_tmp;
+	if (to_le) {
+		for (i = 0; i < ((num_bytes + 3) / 4); i++)
+			dst32[i] = cpu_to_le32(src32[i]);
+		memcpy(dst, dst_tmp, num_bytes);
+	} else {
+		u8 dws = num_bytes & ~3;
+		for (i = 0; i < ((num_bytes + 3) / 4); i++)
+			dst32[i] = le32_to_cpu(src32[i]);
+		memcpy(dst, dst_tmp, dws);
+		if (num_bytes % 4) {
+			for (i = 0; i < (num_bytes % 4); i++)
+				dst[dws+i] = dst_tmp[dws+i];
+		}
+	}
+#else
+	memcpy(dst, src, num_bytes);
+#endif
+}

+ 206 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h

@@ -0,0 +1,206 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_ATOMBIOS_H__
+#define __AMDGPU_ATOMBIOS_H__
+
+struct atom_clock_dividers {
+	u32 post_div;
+	union {
+		struct {
+#ifdef __BIG_ENDIAN
+			u32 reserved : 6;
+			u32 whole_fb_div : 12;
+			u32 frac_fb_div : 14;
+#else
+			u32 frac_fb_div : 14;
+			u32 whole_fb_div : 12;
+			u32 reserved : 6;
+#endif
+		};
+		u32 fb_div;
+	};
+	u32 ref_div;
+	bool enable_post_div;
+	bool enable_dithen;
+	u32 vco_mode;
+	u32 real_clock;
+	/* added for CI */
+	u32 post_divider;
+	u32 flags;
+};
+
+struct atom_mpll_param {
+	union {
+		struct {
+#ifdef __BIG_ENDIAN
+			u32 reserved : 8;
+			u32 clkfrac : 12;
+			u32 clkf : 12;
+#else
+			u32 clkf : 12;
+			u32 clkfrac : 12;
+			u32 reserved : 8;
+#endif
+		};
+		u32 fb_div;
+	};
+	u32 post_div;
+	u32 bwcntl;
+	u32 dll_speed;
+	u32 vco_mode;
+	u32 yclk_sel;
+	u32 qdr;
+	u32 half_rate;
+};
+
+#define MEM_TYPE_GDDR5  0x50
+#define MEM_TYPE_GDDR4  0x40
+#define MEM_TYPE_GDDR3  0x30
+#define MEM_TYPE_DDR2   0x20
+#define MEM_TYPE_GDDR1  0x10
+#define MEM_TYPE_DDR3   0xb0
+#define MEM_TYPE_MASK   0xf0
+
+struct atom_memory_info {
+	u8 mem_vendor;
+	u8 mem_type;
+};
+
+#define MAX_AC_TIMING_ENTRIES 16
+
+struct atom_memory_clock_range_table
+{
+	u8 num_entries;
+	u8 rsv[3];
+	u32 mclk[MAX_AC_TIMING_ENTRIES];
+};
+
+#define VBIOS_MC_REGISTER_ARRAY_SIZE 32
+#define VBIOS_MAX_AC_TIMING_ENTRIES 20
+
+struct atom_mc_reg_entry {
+	u32 mclk_max;
+	u32 mc_data[VBIOS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct atom_mc_register_address {
+	u16 s1;
+	u8 pre_reg_data;
+};
+
+struct atom_mc_reg_table {
+	u8 last;
+	u8 num_entries;
+	struct atom_mc_reg_entry mc_reg_table_entry[VBIOS_MAX_AC_TIMING_ENTRIES];
+	struct atom_mc_register_address mc_reg_address[VBIOS_MC_REGISTER_ARRAY_SIZE];
+};
+
+#define MAX_VOLTAGE_ENTRIES 32
+
+struct atom_voltage_table_entry
+{
+	u16 value;
+	u32 smio_low;
+};
+
+struct atom_voltage_table
+{
+	u32 count;
+	u32 mask_low;
+	u32 phase_delay;
+	struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES];
+};
+
+struct amdgpu_gpio_rec
+amdgpu_atombios_lookup_gpio(struct amdgpu_device *adev,
+			    u8 id);
+
+struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device *adev,
+							  uint8_t id);
+void amdgpu_atombios_i2c_init(struct amdgpu_device *adev);
+
+bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev);
+
+int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev);
+
+bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev,
+				      struct amdgpu_atom_ss *ss,
+				      int id, u32 clock);
+
+int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
+				       u8 clock_type,
+				       u32 clock,
+				       bool strobe_mode,
+				       struct atom_clock_dividers *dividers);
+
+int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
+					    u32 clock,
+					    bool strobe_mode,
+					    struct atom_mpll_param *mpll_param);
+
+uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev);
+uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev);
+void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev,
+				      uint32_t eng_clock);
+void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
+				      uint32_t mem_clock);
+void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
+				 u16 voltage_level,
+				 u8 voltage_type);
+
+void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
+					     u32 eng_clock, u32 mem_clock);
+
+int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
+					      u16 *leakage_id);
+
+int amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(struct amdgpu_device *adev,
+							     u16 *vddc, u16 *vddci,
+							     u16 virtual_voltage_id,
+							     u16 vbios_voltage_id);
+
+int amdgpu_atombios_get_voltage_evv(struct amdgpu_device *adev,
+				    u16 virtual_voltage_id,
+				    u16 *voltage);
+
+bool
+amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev,
+				u8 voltage_type, u8 voltage_mode);
+
+int amdgpu_atombios_get_voltage_table(struct amdgpu_device *adev,
+				      u8 voltage_type, u8 voltage_mode,
+				      struct atom_voltage_table *voltage_table);
+
+int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
+				      u8 module_index,
+				      struct atom_mc_reg_table *reg_table);
+
+void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
+void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev);
+void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
+void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
+
+void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
+
+#endif

+ 572 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c

@@ -0,0 +1,572 @@
+/*
+ * Copyright (c) 2010 Red Hat Inc.
+ * Author : Dave Airlie <airlied@redhat.com>
+ *
+ * Licensed under GPLv2
+ *
+ * ATPX support for both Intel/ATI
+ */
+#include <linux/vga_switcheroo.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+
+#include "amdgpu_acpi.h"
+
+struct amdgpu_atpx_functions {
+	bool px_params;
+	bool power_cntl;
+	bool disp_mux_cntl;
+	bool i2c_mux_cntl;
+	bool switch_start;
+	bool switch_end;
+	bool disp_connectors_mapping;
+	bool disp_detetion_ports;
+};
+
+struct amdgpu_atpx {
+	acpi_handle handle;
+	struct amdgpu_atpx_functions functions;
+};
+
+static struct amdgpu_atpx_priv {
+	bool atpx_detected;
+	/* handle for device - and atpx */
+	acpi_handle dhandle;
+	acpi_handle other_handle;
+	struct amdgpu_atpx atpx;
+} amdgpu_atpx_priv;
+
+struct atpx_verify_interface {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u16 version;		/* version */
+	u32 function_bits;	/* supported functions bit vector */
+} __packed;
+
+struct atpx_px_params {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u32 valid_flags;	/* which flags are valid */
+	u32 flags;		/* flags */
+} __packed;
+
+struct atpx_power_control {
+	u16 size;
+	u8 dgpu_state;
+} __packed;
+
+struct atpx_mux {
+	u16 size;
+	u16 mux;
+} __packed;
+
+bool amdgpu_has_atpx(void) {
+	return amdgpu_atpx_priv.atpx_detected;
+}
+
+/**
+ * amdgpu_atpx_call - call an ATPX method
+ *
+ * @handle: acpi handle
+ * @function: the ATPX function to execute
+ * @params: ATPX function params
+ *
+ * Executes the requested ATPX function (all asics).
+ * Returns a pointer to the acpi output buffer.
+ */
+static union acpi_object *amdgpu_atpx_call(acpi_handle handle, int function,
+					   struct acpi_buffer *params)
+{
+	acpi_status status;
+	union acpi_object atpx_arg_elements[2];
+	struct acpi_object_list atpx_arg;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+	atpx_arg.count = 2;
+	atpx_arg.pointer = &atpx_arg_elements[0];
+
+	atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
+	atpx_arg_elements[0].integer.value = function;
+
+	if (params) {
+		atpx_arg_elements[1].type = ACPI_TYPE_BUFFER;
+		atpx_arg_elements[1].buffer.length = params->length;
+		atpx_arg_elements[1].buffer.pointer = params->pointer;
+	} else {
+		/* We need a second fake parameter */
+		atpx_arg_elements[1].type = ACPI_TYPE_INTEGER;
+		atpx_arg_elements[1].integer.value = 0;
+	}
+
+	status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
+
+	/* Fail only if calling the method fails and ATPX is supported */
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		printk("failed to evaluate ATPX got %s\n",
+		       acpi_format_exception(status));
+		kfree(buffer.pointer);
+		return NULL;
+	}
+
+	return buffer.pointer;
+}
+
+/**
+ * amdgpu_atpx_parse_functions - parse supported functions
+ *
+ * @f: supported functions struct
+ * @mask: supported functions mask from ATPX
+ *
+ * Use the supported functions mask from ATPX function
+ * ATPX_FUNCTION_VERIFY_INTERFACE to determine what functions
+ * are supported (all asics).
+ */
+static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mask)
+{
+	f->px_params = mask & ATPX_GET_PX_PARAMETERS_SUPPORTED;
+	f->power_cntl = mask & ATPX_POWER_CONTROL_SUPPORTED;
+	f->disp_mux_cntl = mask & ATPX_DISPLAY_MUX_CONTROL_SUPPORTED;
+	f->i2c_mux_cntl = mask & ATPX_I2C_MUX_CONTROL_SUPPORTED;
+	f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED;
+	f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED;
+	f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED;
+	f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
+}
+
+/**
+ * amdgpu_atpx_validate_functions - validate ATPX functions
+ *
+ * @atpx: amdgpu atpx struct
+ *
+ * Validate that required functions are enabled (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
+{
+	/* make sure required functions are enabled */
+	/* dGPU power control is required */
+	atpx->functions.power_cntl = true;
+
+	if (atpx->functions.px_params) {
+		union acpi_object *info;
+		struct atpx_px_params output;
+		size_t size;
+		u32 valid_bits;
+
+		info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
+		if (!info)
+			return -EIO;
+
+		memset(&output, 0, sizeof(output));
+
+		size = *(u16 *) info->buffer.pointer;
+		if (size < 10) {
+			printk("ATPX buffer is too small: %zu\n", size);
+			kfree(info);
+			return -EINVAL;
+		}
+		size = min(sizeof(output), size);
+
+		memcpy(&output, info->buffer.pointer, size);
+
+		valid_bits = output.flags & output.valid_flags;
+		/* if separate mux flag is set, mux controls are required */
+		if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
+			atpx->functions.i2c_mux_cntl = true;
+			atpx->functions.disp_mux_cntl = true;
+		}
+		/* if any outputs are muxed, mux controls are required */
+		if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
+				  ATPX_TV_SIGNAL_MUXED |
+				  ATPX_DFP_SIGNAL_MUXED))
+			atpx->functions.disp_mux_cntl = true;
+
+		kfree(info);
+	}
+	return 0;
+}
+
+/**
+ * amdgpu_atpx_verify_interface - verify ATPX
+ *
+ * @atpx: amdgpu atpx struct
+ *
+ * Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function
+ * to initialize ATPX and determine what features are supported
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int amdgpu_atpx_verify_interface(struct amdgpu_atpx *atpx)
+{
+	union acpi_object *info;
+	struct atpx_verify_interface output;
+	size_t size;
+	int err = 0;
+
+	info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_VERIFY_INTERFACE, NULL);
+	if (!info)
+		return -EIO;
+
+	memset(&output, 0, sizeof(output));
+
+	size = *(u16 *) info->buffer.pointer;
+	if (size < 8) {
+		printk("ATPX buffer is too small: %zu\n", size);
+		err = -EINVAL;
+		goto out;
+	}
+	size = min(sizeof(output), size);
+
+	memcpy(&output, info->buffer.pointer, size);
+
+	/* TODO: check version? */
+	printk("ATPX version %u, functions 0x%08x\n",
+	       output.version, output.function_bits);
+
+	amdgpu_atpx_parse_functions(&atpx->functions, output.function_bits);
+
+out:
+	kfree(info);
+	return err;
+}
+
+/**
+ * amdgpu_atpx_set_discrete_state - power up/down discrete GPU
+ *
+ * @atpx: atpx info struct
+ * @state: discrete GPU state (0 = power down, 1 = power up)
+ *
+ * Execute the ATPX_FUNCTION_POWER_CONTROL ATPX function to
+ * power down/up the discrete GPU (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state)
+{
+	struct acpi_buffer params;
+	union acpi_object *info;
+	struct atpx_power_control input;
+
+	if (atpx->functions.power_cntl) {
+		input.size = 3;
+		input.dgpu_state = state;
+		params.length = input.size;
+		params.pointer = &input;
+		info = amdgpu_atpx_call(atpx->handle,
+					ATPX_FUNCTION_POWER_CONTROL,
+					&params);
+		if (!info)
+			return -EIO;
+		kfree(info);
+	}
+	return 0;
+}
+
+/**
+ * amdgpu_atpx_switch_disp_mux - switch display mux
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_DISPLAY_MUX_CONTROL ATPX function to
+ * switch the display mux between the discrete GPU and integrated GPU
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int amdgpu_atpx_switch_disp_mux(struct amdgpu_atpx *atpx, u16 mux_id)
+{
+	struct acpi_buffer params;
+	union acpi_object *info;
+	struct atpx_mux input;
+
+	if (atpx->functions.disp_mux_cntl) {
+		input.size = 4;
+		input.mux = mux_id;
+		params.length = input.size;
+		params.pointer = &input;
+		info = amdgpu_atpx_call(atpx->handle,
+					ATPX_FUNCTION_DISPLAY_MUX_CONTROL,
+					&params);
+		if (!info)
+			return -EIO;
+		kfree(info);
+	}
+	return 0;
+}
+
+/**
+ * amdgpu_atpx_switch_i2c_mux - switch i2c/hpd mux
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_I2C_MUX_CONTROL ATPX function to
+ * switch the i2c/hpd mux between the discrete GPU and integrated GPU
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int amdgpu_atpx_switch_i2c_mux(struct amdgpu_atpx *atpx, u16 mux_id)
+{
+	struct acpi_buffer params;
+	union acpi_object *info;
+	struct atpx_mux input;
+
+	if (atpx->functions.i2c_mux_cntl) {
+		input.size = 4;
+		input.mux = mux_id;
+		params.length = input.size;
+		params.pointer = &input;
+		info = amdgpu_atpx_call(atpx->handle,
+					ATPX_FUNCTION_I2C_MUX_CONTROL,
+					&params);
+		if (!info)
+			return -EIO;
+		kfree(info);
+	}
+	return 0;
+}
+
+/**
+ * amdgpu_atpx_switch_start - notify the sbios of a GPU switch
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION ATPX
+ * function to notify the sbios that a switch between the discrete GPU and
+ * integrated GPU has begun (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int amdgpu_atpx_switch_start(struct amdgpu_atpx *atpx, u16 mux_id)
+{
+	struct acpi_buffer params;
+	union acpi_object *info;
+	struct atpx_mux input;
+
+	if (atpx->functions.switch_start) {
+		input.size = 4;
+		input.mux = mux_id;
+		params.length = input.size;
+		params.pointer = &input;
+		info = amdgpu_atpx_call(atpx->handle,
+					ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION,
+					&params);
+		if (!info)
+			return -EIO;
+		kfree(info);
+	}
+	return 0;
+}
+
+/**
+ * amdgpu_atpx_switch_end - notify the sbios of a GPU switch
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION ATPX
+ * function to notify the sbios that a switch between the discrete GPU and
+ * integrated GPU has ended (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int amdgpu_atpx_switch_end(struct amdgpu_atpx *atpx, u16 mux_id)
+{
+	struct acpi_buffer params;
+	union acpi_object *info;
+	struct atpx_mux input;
+
+	if (atpx->functions.switch_end) {
+		input.size = 4;
+		input.mux = mux_id;
+		params.length = input.size;
+		params.pointer = &input;
+		info = amdgpu_atpx_call(atpx->handle,
+					ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION,
+					&params);
+		if (!info)
+			return -EIO;
+		kfree(info);
+	}
+	return 0;
+}
+
+/**
+ * amdgpu_atpx_switchto - switch to the requested GPU
+ *
+ * @id: GPU to switch to
+ *
+ * Execute the necessary ATPX functions to switch between the discrete GPU and
+ * integrated GPU (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int amdgpu_atpx_switchto(enum vga_switcheroo_client_id id)
+{
+	u16 gpu_id;
+
+	if (id == VGA_SWITCHEROO_IGD)
+		gpu_id = ATPX_INTEGRATED_GPU;
+	else
+		gpu_id = ATPX_DISCRETE_GPU;
+
+	amdgpu_atpx_switch_start(&amdgpu_atpx_priv.atpx, gpu_id);
+	amdgpu_atpx_switch_disp_mux(&amdgpu_atpx_priv.atpx, gpu_id);
+	amdgpu_atpx_switch_i2c_mux(&amdgpu_atpx_priv.atpx, gpu_id);
+	amdgpu_atpx_switch_end(&amdgpu_atpx_priv.atpx, gpu_id);
+
+	return 0;
+}
+
+/**
+ * amdgpu_atpx_power_state - power down/up the requested GPU
+ *
+ * @id: GPU to power down/up
+ * @state: requested power state (0 = off, 1 = on)
+ *
+ * Execute the necessary ATPX function to power down/up the discrete GPU
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int amdgpu_atpx_power_state(enum vga_switcheroo_client_id id,
+				   enum vga_switcheroo_state state)
+{
+	/* on w500 ACPI can't change intel gpu state */
+	if (id == VGA_SWITCHEROO_IGD)
+		return 0;
+
+	amdgpu_atpx_set_discrete_state(&amdgpu_atpx_priv.atpx, state);
+	return 0;
+}
+
+/**
+ * amdgpu_atpx_pci_probe_handle - look up the ATPX handle
+ *
+ * @pdev: pci device
+ *
+ * Look up the ATPX handles (all asics).
+ * Returns true if the handles are found, false if not.
+ */
+static bool amdgpu_atpx_pci_probe_handle(struct pci_dev *pdev)
+{
+	acpi_handle dhandle, atpx_handle;
+	acpi_status status;
+
+	dhandle = ACPI_HANDLE(&pdev->dev);
+	if (!dhandle)
+		return false;
+
+	status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
+	if (ACPI_FAILURE(status)) {
+		amdgpu_atpx_priv.other_handle = dhandle;
+		return false;
+	}
+	amdgpu_atpx_priv.dhandle = dhandle;
+	amdgpu_atpx_priv.atpx.handle = atpx_handle;
+	return true;
+}
+
+/**
+ * amdgpu_atpx_init - verify the ATPX interface
+ *
+ * Verify the ATPX interface (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int amdgpu_atpx_init(void)
+{
+	int r;
+
+	/* set up the ATPX handle */
+	r = amdgpu_atpx_verify_interface(&amdgpu_atpx_priv.atpx);
+	if (r)
+		return r;
+
+	/* validate the atpx setup */
+	r = amdgpu_atpx_validate(&amdgpu_atpx_priv.atpx);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+/**
+ * amdgpu_atpx_get_client_id - get the client id
+ *
+ * @pdev: pci device
+ *
+ * look up whether we are the integrated or discrete GPU (all asics).
+ * Returns the client id.
+ */
+static int amdgpu_atpx_get_client_id(struct pci_dev *pdev)
+{
+	if (amdgpu_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
+		return VGA_SWITCHEROO_IGD;
+	else
+		return VGA_SWITCHEROO_DIS;
+}
+
+static struct vga_switcheroo_handler amdgpu_atpx_handler = {
+	.switchto = amdgpu_atpx_switchto,
+	.power_state = amdgpu_atpx_power_state,
+	.init = amdgpu_atpx_init,
+	.get_client_id = amdgpu_atpx_get_client_id,
+};
+
+/**
+ * amdgpu_atpx_detect - detect whether we have PX
+ *
+ * Check if we have a PX system (all asics).
+ * Returns true if we have a PX system, false if not.
+ */
+static bool amdgpu_atpx_detect(void)
+{
+	char acpi_method_name[255] = { 0 };
+	struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+	struct pci_dev *pdev = NULL;
+	bool has_atpx = false;
+	int vga_count = 0;
+
+	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+		vga_count++;
+
+		has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+	}
+
+	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+		vga_count++;
+
+		has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+	}
+
+	if (has_atpx && vga_count == 2) {
+		acpi_get_name(amdgpu_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
+		printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
+		       acpi_method_name);
+		amdgpu_atpx_priv.atpx_detected = true;
+		return true;
+	}
+	return false;
+}
+
+/**
+ * amdgpu_register_atpx_handler - register with vga_switcheroo
+ *
+ * Register the PX callbacks with vga_switcheroo (all asics).
+ */
+void amdgpu_register_atpx_handler(void)
+{
+	bool r;
+
+	/* detect if we have any ATPX + 2 VGA in the system */
+	r = amdgpu_atpx_detect();
+	if (!r)
+		return;
+
+	vga_switcheroo_register_handler(&amdgpu_atpx_handler);
+}
+
+/**
+ * amdgpu_unregister_atpx_handler - unregister with vga_switcheroo
+ *
+ * Unregister the PX callbacks with vga_switcheroo (all asics).
+ */
+void amdgpu_unregister_atpx_handler(void)
+{
+	vga_switcheroo_unregister_handler();
+}

+ 221 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c

@@ -0,0 +1,221 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+
+#define AMDGPU_BENCHMARK_ITERATIONS 1024
+#define AMDGPU_BENCHMARK_COMMON_MODES_N 17
+
+static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
+				    uint64_t saddr, uint64_t daddr, int n)
+{
+	unsigned long start_jiffies;
+	unsigned long end_jiffies;
+	struct amdgpu_fence *fence = NULL;
+	int i, r;
+
+	start_jiffies = jiffies;
+	for (i = 0; i < n; i++) {
+		struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+		r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence);
+		if (r)
+			goto exit_do_move;
+		r = amdgpu_fence_wait(fence, false);
+		if (r)
+			goto exit_do_move;
+		amdgpu_fence_unref(&fence);
+	}
+	end_jiffies = jiffies;
+	r = jiffies_to_msecs(end_jiffies - start_jiffies);
+
+exit_do_move:
+	if (fence)
+		amdgpu_fence_unref(&fence);
+	return r;
+}
+
+
+static void amdgpu_benchmark_log_results(int n, unsigned size,
+					 unsigned int time,
+					 unsigned sdomain, unsigned ddomain,
+					 char *kind)
+{
+	unsigned int throughput = (n * (size >> 10)) / time;
+	DRM_INFO("amdgpu: %s %u bo moves of %u kB from"
+		 " %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n",
+		 kind, n, size >> 10, sdomain, ddomain, time,
+		 throughput * 8, throughput);
+}
+
+static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
+				  unsigned sdomain, unsigned ddomain)
+{
+	struct amdgpu_bo *dobj = NULL;
+	struct amdgpu_bo *sobj = NULL;
+	uint64_t saddr, daddr;
+	int r, n;
+	int time;
+
+	n = AMDGPU_BENCHMARK_ITERATIONS;
+	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj);
+	if (r) {
+		goto out_cleanup;
+	}
+	r = amdgpu_bo_reserve(sobj, false);
+	if (unlikely(r != 0))
+		goto out_cleanup;
+	r = amdgpu_bo_pin(sobj, sdomain, &saddr);
+	amdgpu_bo_unreserve(sobj);
+	if (r) {
+		goto out_cleanup;
+	}
+	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj);
+	if (r) {
+		goto out_cleanup;
+	}
+	r = amdgpu_bo_reserve(dobj, false);
+	if (unlikely(r != 0))
+		goto out_cleanup;
+	r = amdgpu_bo_pin(dobj, ddomain, &daddr);
+	amdgpu_bo_unreserve(dobj);
+	if (r) {
+		goto out_cleanup;
+	}
+
+	if (adev->mman.buffer_funcs) {
+		time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n);
+		if (time < 0)
+			goto out_cleanup;
+		if (time > 0)
+			amdgpu_benchmark_log_results(n, size, time,
+						     sdomain, ddomain, "dma");
+	}
+
+out_cleanup:
+	if (sobj) {
+		r = amdgpu_bo_reserve(sobj, false);
+		if (likely(r == 0)) {
+			amdgpu_bo_unpin(sobj);
+			amdgpu_bo_unreserve(sobj);
+		}
+		amdgpu_bo_unref(&sobj);
+	}
+	if (dobj) {
+		r = amdgpu_bo_reserve(dobj, false);
+		if (likely(r == 0)) {
+			amdgpu_bo_unpin(dobj);
+			amdgpu_bo_unreserve(dobj);
+		}
+		amdgpu_bo_unref(&dobj);
+	}
+
+	if (r) {
+		DRM_ERROR("Error while benchmarking BO move.\n");
+	}
+}
+
+void amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
+{
+	int i;
+	int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = {
+		640 * 480 * 4,
+		720 * 480 * 4,
+		800 * 600 * 4,
+		848 * 480 * 4,
+		1024 * 768 * 4,
+		1152 * 768 * 4,
+		1280 * 720 * 4,
+		1280 * 800 * 4,
+		1280 * 854 * 4,
+		1280 * 960 * 4,
+		1280 * 1024 * 4,
+		1440 * 900 * 4,
+		1400 * 1050 * 4,
+		1680 * 1050 * 4,
+		1600 * 1200 * 4,
+		1920 * 1080 * 4,
+		1920 * 1200 * 4
+	};
+
+	switch (test_number) {
+	case 1:
+		/* simple test, VRAM to GTT and GTT to VRAM */
+		amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_GTT,
+				      AMDGPU_GEM_DOMAIN_VRAM);
+		amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
+				      AMDGPU_GEM_DOMAIN_GTT);
+		break;
+	case 2:
+		/* simple test, VRAM to VRAM */
+		amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
+				      AMDGPU_GEM_DOMAIN_VRAM);
+		break;
+	case 3:
+		/* GTT to VRAM, buffer size sweep, powers of 2 */
+		for (i = 1; i <= 16384; i <<= 1)
+			amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
+					      AMDGPU_GEM_DOMAIN_GTT,
+					      AMDGPU_GEM_DOMAIN_VRAM);
+		break;
+	case 4:
+		/* VRAM to GTT, buffer size sweep, powers of 2 */
+		for (i = 1; i <= 16384; i <<= 1)
+			amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
+					      AMDGPU_GEM_DOMAIN_VRAM,
+					      AMDGPU_GEM_DOMAIN_GTT);
+		break;
+	case 5:
+		/* VRAM to VRAM, buffer size sweep, powers of 2 */
+		for (i = 1; i <= 16384; i <<= 1)
+			amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
+					      AMDGPU_GEM_DOMAIN_VRAM,
+					      AMDGPU_GEM_DOMAIN_VRAM);
+		break;
+	case 6:
+		/* GTT to VRAM, buffer size sweep, common modes */
+		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
+			amdgpu_benchmark_move(adev, common_modes[i],
+					      AMDGPU_GEM_DOMAIN_GTT,
+					      AMDGPU_GEM_DOMAIN_VRAM);
+		break;
+	case 7:
+		/* VRAM to GTT, buffer size sweep, common modes */
+		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
+			amdgpu_benchmark_move(adev, common_modes[i],
+					      AMDGPU_GEM_DOMAIN_VRAM,
+					      AMDGPU_GEM_DOMAIN_GTT);
+		break;
+	case 8:
+		/* VRAM to VRAM, buffer size sweep, common modes */
+		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
+			amdgpu_benchmark_move(adev, common_modes[i],
+					      AMDGPU_GEM_DOMAIN_VRAM,
+					      AMDGPU_GEM_DOMAIN_VRAM);
+		break;
+
+	default:
+		DRM_ERROR("Unknown benchmark\n");
+	}
+}

+ 363 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c

@@ -0,0 +1,363 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "atom.h"
+
+#include <linux/vga_switcheroo.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+/*
+ * BIOS.
+ */
+
+/* If you boot an IGP board with a discrete card as the primary,
+ * the IGP rom is not accessible via the rom bar as the IGP rom is
+ * part of the system bios.  On boot, the system bios puts a
+ * copy of the igp rom at the start of vram if a discrete card is
+ * present.
+ */
+static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
+{
+	uint8_t __iomem *bios;
+	resource_size_t vram_base;
+	resource_size_t size = 256 * 1024; /* ??? */
+
+	if (!(adev->flags & AMDGPU_IS_APU))
+		if (!amdgpu_card_posted(adev))
+			return false;
+
+	adev->bios = NULL;
+	vram_base = pci_resource_start(adev->pdev, 0);
+	bios = ioremap(vram_base, size);
+	if (!bios) {
+		return false;
+	}
+
+	if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+		iounmap(bios);
+		return false;
+	}
+	adev->bios = kmalloc(size, GFP_KERNEL);
+	if (adev->bios == NULL) {
+		iounmap(bios);
+		return false;
+	}
+	memcpy_fromio(adev->bios, bios, size);
+	iounmap(bios);
+	return true;
+}
+
+bool amdgpu_read_bios(struct amdgpu_device *adev)
+{
+	uint8_t __iomem *bios, val1, val2;
+	size_t size;
+
+	adev->bios = NULL;
+	/* XXX: some cards may return 0 for rom size? ddx has a workaround */
+	bios = pci_map_rom(adev->pdev, &size);
+	if (!bios) {
+		return false;
+	}
+
+	val1 = readb(&bios[0]);
+	val2 = readb(&bios[1]);
+
+	if (size == 0 || val1 != 0x55 || val2 != 0xaa) {
+		pci_unmap_rom(adev->pdev, bios);
+		return false;
+	}
+	adev->bios = kzalloc(size, GFP_KERNEL);
+	if (adev->bios == NULL) {
+		pci_unmap_rom(adev->pdev, bios);
+		return false;
+	}
+	memcpy_fromio(adev->bios, bios, size);
+	pci_unmap_rom(adev->pdev, bios);
+	return true;
+}
+
+static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
+{
+	uint8_t __iomem *bios;
+	size_t size;
+
+	adev->bios = NULL;
+
+	bios = pci_platform_rom(adev->pdev, &size);
+	if (!bios) {
+		return false;
+	}
+
+	if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+		return false;
+	}
+	adev->bios = kmemdup(bios, size, GFP_KERNEL);
+	if (adev->bios == NULL) {
+		return false;
+	}
+
+	return true;
+}
+
+#ifdef CONFIG_ACPI
+/* ATRM is used to get the BIOS on the discrete cards in
+ * dual-gpu systems.
+ */
+/* retrieve the ROM in 4k blocks */
+#define ATRM_BIOS_PAGE 4096
+/**
+ * amdgpu_atrm_call - fetch a chunk of the vbios
+ *
+ * @atrm_handle: acpi ATRM handle
+ * @bios: vbios image pointer
+ * @offset: offset of vbios image data to fetch
+ * @len: length of vbios image data to fetch
+ *
+ * Executes ATRM to fetch a chunk of the discrete
+ * vbios image on PX systems (all asics).
+ * Returns the length of the buffer fetched.
+ */
+static int amdgpu_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
+			    int offset, int len)
+{
+	acpi_status status;
+	union acpi_object atrm_arg_elements[2], *obj;
+	struct acpi_object_list atrm_arg;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+
+	atrm_arg.count = 2;
+	atrm_arg.pointer = &atrm_arg_elements[0];
+
+	atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
+	atrm_arg_elements[0].integer.value = offset;
+
+	atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
+	atrm_arg_elements[1].integer.value = len;
+
+	status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
+	if (ACPI_FAILURE(status)) {
+		printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
+		return -ENODEV;
+	}
+
+	obj = (union acpi_object *)buffer.pointer;
+	memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
+	len = obj->buffer.length;
+	kfree(buffer.pointer);
+	return len;
+}
+
+static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
+{
+	int ret;
+	int size = 256 * 1024;
+	int i;
+	struct pci_dev *pdev = NULL;
+	acpi_handle dhandle, atrm_handle;
+	acpi_status status;
+	bool found = false;
+
+	/* ATRM is for the discrete card only */
+	if (adev->flags & AMDGPU_IS_APU)
+		return false;
+
+	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+		dhandle = ACPI_HANDLE(&pdev->dev);
+		if (!dhandle)
+			continue;
+
+		status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+		if (!ACPI_FAILURE(status)) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+			dhandle = ACPI_HANDLE(&pdev->dev);
+			if (!dhandle)
+				continue;
+
+			status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+			if (!ACPI_FAILURE(status)) {
+				found = true;
+				break;
+			}
+		}
+	}
+
+	if (!found)
+		return false;
+
+	adev->bios = kmalloc(size, GFP_KERNEL);
+	if (!adev->bios) {
+		DRM_ERROR("Unable to allocate bios\n");
+		return false;
+	}
+
+	for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
+		ret = amdgpu_atrm_call(atrm_handle,
+				       adev->bios,
+				       (i * ATRM_BIOS_PAGE),
+				       ATRM_BIOS_PAGE);
+		if (ret < ATRM_BIOS_PAGE)
+			break;
+	}
+
+	if (i == 0 || adev->bios[0] != 0x55 || adev->bios[1] != 0xaa) {
+		kfree(adev->bios);
+		return false;
+	}
+	return true;
+}
+#else
+static inline bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
+{
+	return false;
+}
+#endif
+
+static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev)
+{
+	if (adev->flags & AMDGPU_IS_APU)
+		return igp_read_bios_from_vram(adev);
+	else
+		return amdgpu_asic_read_disabled_bios(adev);
+}
+
+#ifdef CONFIG_ACPI
+static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
+{
+	bool ret = false;
+	struct acpi_table_header *hdr;
+	acpi_size tbl_size;
+	UEFI_ACPI_VFCT *vfct;
+	GOP_VBIOS_CONTENT *vbios;
+	VFCT_IMAGE_HEADER *vhdr;
+
+	if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
+		return false;
+	if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
+		DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
+		goto out_unmap;
+	}
+
+	vfct = (UEFI_ACPI_VFCT *)hdr;
+	if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
+		DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
+		goto out_unmap;
+	}
+
+	vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
+	vhdr = &vbios->VbiosHeader;
+	DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
+			vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
+			vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
+
+	if (vhdr->PCIBus != adev->pdev->bus->number ||
+	    vhdr->PCIDevice != PCI_SLOT(adev->pdev->devfn) ||
+	    vhdr->PCIFunction != PCI_FUNC(adev->pdev->devfn) ||
+	    vhdr->VendorID != adev->pdev->vendor ||
+	    vhdr->DeviceID != adev->pdev->device) {
+		DRM_INFO("ACPI VFCT table is not for this card\n");
+		goto out_unmap;
+	}
+
+	if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
+		DRM_ERROR("ACPI VFCT image truncated\n");
+		goto out_unmap;
+	}
+
+	adev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
+	ret = !!adev->bios;
+
+out_unmap:
+	return ret;
+}
+#else
+static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
+{
+	return false;
+}
+#endif
+
+bool amdgpu_get_bios(struct amdgpu_device *adev)
+{
+	bool r;
+	uint16_t tmp;
+
+	r = amdgpu_atrm_get_bios(adev);
+	if (r == false)
+		r = amdgpu_acpi_vfct_bios(adev);
+	if (r == false)
+		r = igp_read_bios_from_vram(adev);
+	if (r == false)
+		r = amdgpu_read_bios(adev);
+	if (r == false) {
+		r = amdgpu_read_disabled_bios(adev);
+	}
+	if (r == false) {
+		r = amdgpu_read_platform_bios(adev);
+	}
+	if (r == false || adev->bios == NULL) {
+		DRM_ERROR("Unable to locate a BIOS ROM\n");
+		adev->bios = NULL;
+		return false;
+	}
+	if (adev->bios[0] != 0x55 || adev->bios[1] != 0xaa) {
+		printk("BIOS signature incorrect %x %x\n", adev->bios[0], adev->bios[1]);
+		goto free_bios;
+	}
+
+	tmp = RBIOS16(0x18);
+	if (RBIOS8(tmp + 0x14) != 0x0) {
+		DRM_INFO("Not an x86 BIOS ROM, not using.\n");
+		goto free_bios;
+	}
+
+	adev->bios_header_start = RBIOS16(0x48);
+	if (!adev->bios_header_start) {
+		goto free_bios;
+	}
+	tmp = adev->bios_header_start + 4;
+	if (!memcmp(adev->bios + tmp, "ATOM", 4) ||
+	    !memcmp(adev->bios + tmp, "MOTA", 4)) {
+		adev->is_atom_bios = true;
+	} else {
+		adev->is_atom_bios = false;
+	}
+
+	DRM_DEBUG("%sBIOS detected\n", adev->is_atom_bios ? "ATOM" : "COM");
+	return true;
+free_bios:
+	kfree(adev->bios);
+	adev->bios = NULL;
+	return false;
+}

+ 268 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c

@@ -0,0 +1,268 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Christian König <deathsimple@vodafone.de>
+ */
+
+#include <drm/drmP.h>
+#include "amdgpu.h"
+
+static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv,
+				 struct amdgpu_bo_list **result,
+				 int *id)
+{
+	int r;
+
+	*result = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
+	if (!*result)
+		return -ENOMEM;
+
+	mutex_lock(&fpriv->bo_list_lock);
+	r = idr_alloc(&fpriv->bo_list_handles, *result,
+		      1, 0, GFP_KERNEL);
+	if (r < 0) {
+		mutex_unlock(&fpriv->bo_list_lock);
+		kfree(*result);
+		return r;
+	}
+	*id = r;
+
+	mutex_init(&(*result)->lock);
+	(*result)->num_entries = 0;
+	(*result)->array = NULL;
+
+	mutex_lock(&(*result)->lock);
+	mutex_unlock(&fpriv->bo_list_lock);
+
+	return 0;
+}
+
+static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
+{
+	struct amdgpu_bo_list *list;
+
+	mutex_lock(&fpriv->bo_list_lock);
+	list = idr_find(&fpriv->bo_list_handles, id);
+	if (list) {
+		mutex_lock(&list->lock);
+		idr_remove(&fpriv->bo_list_handles, id);
+		mutex_unlock(&list->lock);
+		amdgpu_bo_list_free(list);
+	}
+	mutex_unlock(&fpriv->bo_list_lock);
+}
+
+static int amdgpu_bo_list_set(struct amdgpu_device *adev,
+				     struct drm_file *filp,
+				     struct amdgpu_bo_list *list,
+				     struct drm_amdgpu_bo_list_entry *info,
+				     unsigned num_entries)
+{
+	struct amdgpu_bo_list_entry *array;
+	struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
+	struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
+	struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
+
+	bool has_userptr = false;
+	unsigned i;
+
+	array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
+	if (!array)
+		return -ENOMEM;
+	memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
+
+	for (i = 0; i < num_entries; ++i) {
+		struct amdgpu_bo_list_entry *entry = &array[i];
+		struct drm_gem_object *gobj;
+
+		gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle);
+		if (!gobj)
+			goto error_free;
+
+		entry->robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
+		drm_gem_object_unreference_unlocked(gobj);
+		entry->priority = info[i].bo_priority;
+		entry->prefered_domains = entry->robj->initial_domain;
+		entry->allowed_domains = entry->prefered_domains;
+		if (entry->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
+			entry->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
+		if (amdgpu_ttm_tt_has_userptr(entry->robj->tbo.ttm)) {
+			has_userptr = true;
+			entry->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
+			entry->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
+		}
+		entry->tv.bo = &entry->robj->tbo;
+		entry->tv.shared = true;
+
+		if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
+			gds_obj = entry->robj;
+		if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GWS)
+			gws_obj = entry->robj;
+		if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
+			oa_obj = entry->robj;
+	}
+
+	for (i = 0; i < list->num_entries; ++i)
+		amdgpu_bo_unref(&list->array[i].robj);
+
+	drm_free_large(list->array);
+
+	list->gds_obj = gds_obj;
+	list->gws_obj = gws_obj;
+	list->oa_obj = oa_obj;
+	list->has_userptr = has_userptr;
+	list->array = array;
+	list->num_entries = num_entries;
+
+	return 0;
+
+error_free:
+	drm_free_large(array);
+	return -ENOENT;
+}
+
+struct amdgpu_bo_list *
+amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
+{
+	struct amdgpu_bo_list *result;
+
+	mutex_lock(&fpriv->bo_list_lock);
+	result = idr_find(&fpriv->bo_list_handles, id);
+	if (result)
+		mutex_lock(&result->lock);
+	mutex_unlock(&fpriv->bo_list_lock);
+	return result;
+}
+
+void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
+{
+	mutex_unlock(&list->lock);
+}
+
+void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
+{
+	unsigned i;
+
+	for (i = 0; i < list->num_entries; ++i)
+		amdgpu_bo_unref(&list->array[i].robj);
+
+	mutex_destroy(&list->lock);
+	drm_free_large(list->array);
+	kfree(list);
+}
+
+int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp)
+{
+	const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
+
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_fpriv *fpriv = filp->driver_priv;
+	union drm_amdgpu_bo_list *args = data;
+	uint32_t handle = args->in.list_handle;
+	const void __user *uptr = (const void*)(long)args->in.bo_info_ptr;
+
+	struct drm_amdgpu_bo_list_entry *info;
+	struct amdgpu_bo_list *list;
+
+	int r;
+
+	info = drm_malloc_ab(args->in.bo_number,
+			     sizeof(struct drm_amdgpu_bo_list_entry));
+	if (!info)
+		return -ENOMEM;
+
+	/* copy the handle array from userspace to a kernel buffer */
+	r = -EFAULT;
+	if (likely(info_size == args->in.bo_info_size)) {
+		unsigned long bytes = args->in.bo_number *
+			args->in.bo_info_size;
+
+		if (copy_from_user(info, uptr, bytes))
+			goto error_free;
+
+	} else {
+		unsigned long bytes = min(args->in.bo_info_size, info_size);
+		unsigned i;
+
+		memset(info, 0, args->in.bo_number * info_size);
+		for (i = 0; i < args->in.bo_number; ++i) {
+			if (copy_from_user(&info[i], uptr, bytes))
+				goto error_free;
+			
+			uptr += args->in.bo_info_size;
+		}
+	}
+
+	switch (args->in.operation) {
+	case AMDGPU_BO_LIST_OP_CREATE:
+		r = amdgpu_bo_list_create(fpriv, &list, &handle);
+		if (r) 
+			goto error_free;
+
+		r = amdgpu_bo_list_set(adev, filp, list, info,
+					      args->in.bo_number);
+		amdgpu_bo_list_put(list);
+		if (r)
+			goto error_free;
+
+		break;
+		
+	case AMDGPU_BO_LIST_OP_DESTROY:
+		amdgpu_bo_list_destroy(fpriv, handle);
+		handle = 0;
+		break;
+
+	case AMDGPU_BO_LIST_OP_UPDATE:
+		r = -ENOENT;
+		list = amdgpu_bo_list_get(fpriv, handle);
+		if (!list)
+			goto error_free;
+
+		r = amdgpu_bo_list_set(adev, filp, list, info,
+					      args->in.bo_number);
+		amdgpu_bo_list_put(list);
+		if (r)
+			goto error_free;
+
+		break;
+
+	default:
+		r = -EINVAL;
+		goto error_free;
+	}
+
+	memset(args, 0, sizeof(*args));
+	args->out.list_handle = handle;
+	drm_free_large(info);
+
+	return 0;
+
+error_free:
+	drm_free_large(info);
+	return r;
+}

+ 1907 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c

@@ -0,0 +1,1907 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+#include "atom.h"
+#include "atombios_encoders.h"
+#include "atombios_dp.h"
+#include "amdgpu_connectors.h"
+#include "amdgpu_i2c.h"
+
+#include <linux/pm_runtime.h>
+
+void amdgpu_connector_hotplug(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+	/* bail if the connector does not have hpd pin, e.g.,
+	 * VGA, TV, etc.
+	 */
+	if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE)
+		return;
+
+	amdgpu_display_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
+
+	/* if the connector is already off, don't turn it back on */
+	if (connector->dpms != DRM_MODE_DPMS_ON)
+		return;
+
+	/* just deal with DP (not eDP) here. */
+	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+		struct amdgpu_connector_atom_dig *dig_connector =
+			amdgpu_connector->con_priv;
+
+		/* if existing sink type was not DP no need to retrain */
+		if (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT)
+			return;
+
+		/* first get sink type as it may be reset after (un)plug */
+		dig_connector->dp_sink_type = amdgpu_atombios_dp_get_sinktype(amdgpu_connector);
+		/* don't do anything if sink is not display port, i.e.,
+		 * passive dp->(dvi|hdmi) adaptor
+		 */
+		if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+			int saved_dpms = connector->dpms;
+			/* Only turn off the display if it's physically disconnected */
+			if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
+				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+			} else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
+				/* set it to OFF so that drm_helper_connector_dpms()
+				 * won't return immediately since the current state
+				 * is ON at this point.
+				 */
+				connector->dpms = DRM_MODE_DPMS_OFF;
+				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+			}
+			connector->dpms = saved_dpms;
+		}
+	}
+}
+
+static void amdgpu_connector_property_change_mode(struct drm_encoder *encoder)
+{
+	struct drm_crtc *crtc = encoder->crtc;
+
+	if (crtc && crtc->enabled) {
+		drm_crtc_helper_set_mode(crtc, &crtc->mode,
+					 crtc->x, crtc->y, crtc->primary->fb);
+	}
+}
+
+int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
+{
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+	struct amdgpu_connector_atom_dig *dig_connector;
+	int bpc = 8;
+	unsigned mode_clock, max_tmds_clock;
+
+	switch (connector->connector_type) {
+	case DRM_MODE_CONNECTOR_DVII:
+	case DRM_MODE_CONNECTOR_HDMIB:
+		if (amdgpu_connector->use_digital) {
+			if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+				if (connector->display_info.bpc)
+					bpc = connector->display_info.bpc;
+			}
+		}
+		break;
+	case DRM_MODE_CONNECTOR_DVID:
+	case DRM_MODE_CONNECTOR_HDMIA:
+		if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+			if (connector->display_info.bpc)
+				bpc = connector->display_info.bpc;
+		}
+		break;
+	case DRM_MODE_CONNECTOR_DisplayPort:
+		dig_connector = amdgpu_connector->con_priv;
+		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
+		    drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+			if (connector->display_info.bpc)
+				bpc = connector->display_info.bpc;
+		}
+		break;
+	case DRM_MODE_CONNECTOR_eDP:
+	case DRM_MODE_CONNECTOR_LVDS:
+		if (connector->display_info.bpc)
+			bpc = connector->display_info.bpc;
+		else {
+			const struct drm_connector_helper_funcs *connector_funcs =
+				connector->helper_private;
+			struct drm_encoder *encoder = connector_funcs->best_encoder(connector);
+			struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+			struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+
+			if (dig->lcd_misc & ATOM_PANEL_MISC_V13_6BIT_PER_COLOR)
+				bpc = 6;
+			else if (dig->lcd_misc & ATOM_PANEL_MISC_V13_8BIT_PER_COLOR)
+				bpc = 8;
+		}
+		break;
+	}
+
+	if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+		/*
+		 * Pre DCE-8 hw can't handle > 12 bpc, and more than 12 bpc doesn't make
+		 * much sense without support for > 12 bpc framebuffers. RGB 4:4:4 at
+		 * 12 bpc is always supported on hdmi deep color sinks, as this is
+		 * required by the HDMI-1.3 spec. Clamp to a safe 12 bpc maximum.
+		 */
+		if (bpc > 12) {
+			DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 12 bpc.\n",
+				  connector->name, bpc);
+			bpc = 12;
+		}
+
+		/* Any defined maximum tmds clock limit we must not exceed? */
+		if (connector->max_tmds_clock > 0) {
+			/* mode_clock is clock in kHz for mode to be modeset on this connector */
+			mode_clock = amdgpu_connector->pixelclock_for_modeset;
+
+			/* Maximum allowable input clock in kHz */
+			max_tmds_clock = connector->max_tmds_clock * 1000;
+
+			DRM_DEBUG("%s: hdmi mode dotclock %d kHz, max tmds input clock %d kHz.\n",
+				  connector->name, mode_clock, max_tmds_clock);
+
+			/* Check if bpc is within clock limit. Try to degrade gracefully otherwise */
+			if ((bpc == 12) && (mode_clock * 3/2 > max_tmds_clock)) {
+				if ((connector->display_info.edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30) &&
+				    (mode_clock * 5/4 <= max_tmds_clock))
+					bpc = 10;
+				else
+					bpc = 8;
+
+				DRM_DEBUG("%s: HDMI deep color 12 bpc exceeds max tmds clock. Using %d bpc.\n",
+					  connector->name, bpc);
+			}
+
+			if ((bpc == 10) && (mode_clock * 5/4 > max_tmds_clock)) {
+				bpc = 8;
+				DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n",
+					  connector->name, bpc);
+			} else if (bpc > 8) {
+				/* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
+				DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
+					  connector->name);
+				bpc = 8;
+			}
+		}
+	}
+
+	if ((amdgpu_deep_color == 0) && (bpc > 8)) {
+		DRM_DEBUG("%s: Deep color disabled. Set amdgpu module param deep_color=1 to enable.\n",
+			  connector->name);
+		bpc = 8;
+	}
+
+	DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n",
+		  connector->name, connector->display_info.bpc, bpc);
+
+	return bpc;
+}
+
+static void
+amdgpu_connector_update_scratch_regs(struct drm_connector *connector,
+				      enum drm_connector_status status)
+{
+	struct drm_encoder *best_encoder = NULL;
+	struct drm_encoder *encoder = NULL;
+	const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+	bool connected;
+	int i;
+
+	best_encoder = connector_funcs->best_encoder(connector);
+
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0)
+			break;
+
+		encoder = drm_encoder_find(connector->dev,
+					connector->encoder_ids[i]);
+		if (!encoder)
+			continue;
+
+		if ((encoder == best_encoder) && (status == connector_status_connected))
+			connected = true;
+		else
+			connected = false;
+
+		amdgpu_atombios_encoder_set_bios_scratch_regs(connector, encoder, connected);
+
+	}
+}
+
+static struct drm_encoder *
+amdgpu_connector_find_encoder(struct drm_connector *connector,
+			       int encoder_type)
+{
+	struct drm_encoder *encoder;
+	int i;
+
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0)
+			break;
+		encoder = drm_encoder_find(connector->dev,
+					connector->encoder_ids[i]);
+		if (!encoder)
+			continue;
+
+		if (encoder->encoder_type == encoder_type)
+			return encoder;
+	}
+	return NULL;
+}
+
+struct edid *amdgpu_connector_edid(struct drm_connector *connector)
+{
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+	struct drm_property_blob *edid_blob = connector->edid_blob_ptr;
+
+	if (amdgpu_connector->edid) {
+		return amdgpu_connector->edid;
+	} else if (edid_blob) {
+		struct edid *edid = kmemdup(edid_blob->data, edid_blob->length, GFP_KERNEL);
+		if (edid)
+			amdgpu_connector->edid = edid;
+	}
+	return amdgpu_connector->edid;
+}
+
+static struct edid *
+amdgpu_connector_get_hardcoded_edid(struct amdgpu_device *adev)
+{
+	struct edid *edid;
+
+	if (adev->mode_info.bios_hardcoded_edid) {
+		edid = kmalloc(adev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL);
+		if (edid) {
+			memcpy((unsigned char *)edid,
+			       (unsigned char *)adev->mode_info.bios_hardcoded_edid,
+			       adev->mode_info.bios_hardcoded_edid_size);
+			return edid;
+		}
+	}
+	return NULL;
+}
+
+static void amdgpu_connector_get_edid(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+	if (amdgpu_connector->edid)
+		return;
+
+	/* on hw with routers, select right port */
+	if (amdgpu_connector->router.ddc_valid)
+		amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
+
+	if ((amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+	     ENCODER_OBJECT_ID_NONE) &&
+	    amdgpu_connector->ddc_bus->has_aux) {
+		amdgpu_connector->edid = drm_get_edid(connector,
+						      &amdgpu_connector->ddc_bus->aux.ddc);
+	} else if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+		   (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+		struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv;
+
+		if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
+		     dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) &&
+		    amdgpu_connector->ddc_bus->has_aux)
+			amdgpu_connector->edid = drm_get_edid(connector,
+							      &amdgpu_connector->ddc_bus->aux.ddc);
+		else if (amdgpu_connector->ddc_bus)
+			amdgpu_connector->edid = drm_get_edid(connector,
+							      &amdgpu_connector->ddc_bus->adapter);
+	} else if (amdgpu_connector->ddc_bus) {
+		amdgpu_connector->edid = drm_get_edid(connector,
+						      &amdgpu_connector->ddc_bus->adapter);
+	}
+
+	if (!amdgpu_connector->edid) {
+		/* some laptops provide a hardcoded edid in rom for LCDs */
+		if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
+		     (connector->connector_type == DRM_MODE_CONNECTOR_eDP)))
+			amdgpu_connector->edid = amdgpu_connector_get_hardcoded_edid(adev);
+	}
+}
+
+static void amdgpu_connector_free_edid(struct drm_connector *connector)
+{
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+	if (amdgpu_connector->edid) {
+		kfree(amdgpu_connector->edid);
+		amdgpu_connector->edid = NULL;
+	}
+}
+
+static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
+{
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+	int ret;
+
+	if (amdgpu_connector->edid) {
+		drm_mode_connector_update_edid_property(connector, amdgpu_connector->edid);
+		ret = drm_add_edid_modes(connector, amdgpu_connector->edid);
+		drm_edid_to_eld(connector, amdgpu_connector->edid);
+		return ret;
+	}
+	drm_mode_connector_update_edid_property(connector, NULL);
+	return 0;
+}
+
+static struct drm_encoder *
+amdgpu_connector_best_single_encoder(struct drm_connector *connector)
+{
+	int enc_id = connector->encoder_ids[0];
+
+	/* pick the encoder ids */
+	if (enc_id)
+		return drm_encoder_find(connector->dev, enc_id);
+	return NULL;
+}
+
+static void amdgpu_get_native_mode(struct drm_connector *connector)
+{
+	struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
+	struct amdgpu_encoder *amdgpu_encoder;
+
+	if (encoder == NULL)
+		return;
+
+	amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+	if (!list_empty(&connector->probed_modes)) {
+		struct drm_display_mode *preferred_mode =
+			list_first_entry(&connector->probed_modes,
+					 struct drm_display_mode, head);
+
+		amdgpu_encoder->native_mode = *preferred_mode;
+	} else {
+		amdgpu_encoder->native_mode.clock = 0;
+	}
+}
+
+static struct drm_display_mode *
+amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct drm_display_mode *mode = NULL;
+	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+
+	if (native_mode->hdisplay != 0 &&
+	    native_mode->vdisplay != 0 &&
+	    native_mode->clock != 0) {
+		mode = drm_mode_duplicate(dev, native_mode);
+		mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+		drm_mode_set_name(mode);
+
+		DRM_DEBUG_KMS("Adding native panel mode %s\n", mode->name);
+	} else if (native_mode->hdisplay != 0 &&
+		   native_mode->vdisplay != 0) {
+		/* mac laptops without an edid */
+		/* Note that this is not necessarily the exact panel mode,
+		 * but an approximation based on the cvt formula.  For these
+		 * systems we should ideally read the mode info out of the
+		 * registers or add a mode table, but this works and is much
+		 * simpler.
+		 */
+		mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
+		mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+		DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
+	}
+	return mode;
+}
+
+static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
+					       struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct drm_display_mode *mode = NULL;
+	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+	int i;
+	struct mode_size {
+		int w;
+		int h;
+	} common_modes[17] = {
+		{ 640,  480},
+		{ 720,  480},
+		{ 800,  600},
+		{ 848,  480},
+		{1024,  768},
+		{1152,  768},
+		{1280,  720},
+		{1280,  800},
+		{1280,  854},
+		{1280,  960},
+		{1280, 1024},
+		{1440,  900},
+		{1400, 1050},
+		{1680, 1050},
+		{1600, 1200},
+		{1920, 1080},
+		{1920, 1200}
+	};
+
+	for (i = 0; i < 17; i++) {
+		if (amdgpu_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
+			if (common_modes[i].w > 1024 ||
+			    common_modes[i].h > 768)
+				continue;
+		}
+		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			if (common_modes[i].w > native_mode->hdisplay ||
+			    common_modes[i].h > native_mode->vdisplay ||
+			    (common_modes[i].w == native_mode->hdisplay &&
+			     common_modes[i].h == native_mode->vdisplay))
+				continue;
+		}
+		if (common_modes[i].w < 320 || common_modes[i].h < 200)
+			continue;
+
+		mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
+		drm_mode_probed_add(connector, mode);
+	}
+}
+
+static int amdgpu_connector_set_property(struct drm_connector *connector,
+					  struct drm_property *property,
+					  uint64_t val)
+{
+	struct drm_device *dev = connector->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct amdgpu_encoder *amdgpu_encoder;
+
+	if (property == adev->mode_info.coherent_mode_property) {
+		struct amdgpu_encoder_atom_dig *dig;
+		bool new_coherent_mode;
+
+		/* need to find digital encoder on connector */
+		encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+		if (!amdgpu_encoder->enc_priv)
+			return 0;
+
+		dig = amdgpu_encoder->enc_priv;
+		new_coherent_mode = val ? true : false;
+		if (dig->coherent_mode != new_coherent_mode) {
+			dig->coherent_mode = new_coherent_mode;
+			amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
+		}
+	}
+
+	if (property == adev->mode_info.audio_property) {
+		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+		/* need to find digital encoder on connector */
+		encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+		if (amdgpu_connector->audio != val) {
+			amdgpu_connector->audio = val;
+			amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
+		}
+	}
+
+	if (property == adev->mode_info.dither_property) {
+		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+		/* need to find digital encoder on connector */
+		encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+		if (amdgpu_connector->dither != val) {
+			amdgpu_connector->dither = val;
+			amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
+		}
+	}
+
+	if (property == adev->mode_info.underscan_property) {
+		/* need to find digital encoder on connector */
+		encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+		if (amdgpu_encoder->underscan_type != val) {
+			amdgpu_encoder->underscan_type = val;
+			amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
+		}
+	}
+
+	if (property == adev->mode_info.underscan_hborder_property) {
+		/* need to find digital encoder on connector */
+		encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+		if (amdgpu_encoder->underscan_hborder != val) {
+			amdgpu_encoder->underscan_hborder = val;
+			amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
+		}
+	}
+
+	if (property == adev->mode_info.underscan_vborder_property) {
+		/* need to find digital encoder on connector */
+		encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+		if (amdgpu_encoder->underscan_vborder != val) {
+			amdgpu_encoder->underscan_vborder = val;
+			amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
+		}
+	}
+
+	if (property == adev->mode_info.load_detect_property) {
+		struct amdgpu_connector *amdgpu_connector =
+			to_amdgpu_connector(connector);
+
+		if (val == 0)
+			amdgpu_connector->dac_load_detect = false;
+		else
+			amdgpu_connector->dac_load_detect = true;
+	}
+
+	if (property == dev->mode_config.scaling_mode_property) {
+		enum amdgpu_rmx_type rmx_type;
+
+		if (connector->encoder) {
+			amdgpu_encoder = to_amdgpu_encoder(connector->encoder);
+		} else {
+			const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+			amdgpu_encoder = to_amdgpu_encoder(connector_funcs->best_encoder(connector));
+		}
+
+		switch (val) {
+		default:
+		case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
+		case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
+		case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
+		case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
+		}
+		if (amdgpu_encoder->rmx_type == rmx_type)
+			return 0;
+
+		if ((rmx_type != DRM_MODE_SCALE_NONE) &&
+		    (amdgpu_encoder->native_mode.clock == 0))
+			return 0;
+
+		amdgpu_encoder->rmx_type = rmx_type;
+
+		amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
+	}
+
+	return 0;
+}
+
+static void
+amdgpu_connector_fixup_lcd_native_mode(struct drm_encoder *encoder,
+					struct drm_connector *connector)
+{
+	struct amdgpu_encoder *amdgpu_encoder =	to_amdgpu_encoder(encoder);
+	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+	struct drm_display_mode *t, *mode;
+
+	/* If the EDID preferred mode doesn't match the native mode, use it */
+	list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
+		if (mode->type & DRM_MODE_TYPE_PREFERRED) {
+			if (mode->hdisplay != native_mode->hdisplay ||
+			    mode->vdisplay != native_mode->vdisplay)
+				memcpy(native_mode, mode, sizeof(*mode));
+		}
+	}
+
+	/* Try to get native mode details from EDID if necessary */
+	if (!native_mode->clock) {
+		list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
+			if (mode->hdisplay == native_mode->hdisplay &&
+			    mode->vdisplay == native_mode->vdisplay) {
+				*native_mode = *mode;
+				drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
+				DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
+				break;
+			}
+		}
+	}
+
+	if (!native_mode->clock) {
+		DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
+		amdgpu_encoder->rmx_type = RMX_OFF;
+	}
+}
+
+static int amdgpu_connector_lvds_get_modes(struct drm_connector *connector)
+{
+	struct drm_encoder *encoder;
+	int ret = 0;
+	struct drm_display_mode *mode;
+
+	amdgpu_connector_get_edid(connector);
+	ret = amdgpu_connector_ddc_get_modes(connector);
+	if (ret > 0) {
+		encoder = amdgpu_connector_best_single_encoder(connector);
+		if (encoder) {
+			amdgpu_connector_fixup_lcd_native_mode(encoder, connector);
+			/* add scaled modes */
+			amdgpu_connector_add_common_modes(encoder, connector);
+		}
+		return ret;
+	}
+
+	encoder = amdgpu_connector_best_single_encoder(connector);
+	if (!encoder)
+		return 0;
+
+	/* we have no EDID modes */
+	mode = amdgpu_connector_lcd_native_mode(encoder);
+	if (mode) {
+		ret = 1;
+		drm_mode_probed_add(connector, mode);
+		/* add the width/height from vbios tables if available */
+		connector->display_info.width_mm = mode->width_mm;
+		connector->display_info.height_mm = mode->height_mm;
+		/* add scaled modes */
+		amdgpu_connector_add_common_modes(encoder, connector);
+	}
+
+	return ret;
+}
+
+static int amdgpu_connector_lvds_mode_valid(struct drm_connector *connector,
+					     struct drm_display_mode *mode)
+{
+	struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
+
+	if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
+		return MODE_PANEL;
+
+	if (encoder) {
+		struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+		struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+
+		/* AVIVO hardware supports downscaling modes larger than the panel
+		 * to the panel size, but I'm not sure this is desirable.
+		 */
+		if ((mode->hdisplay > native_mode->hdisplay) ||
+		    (mode->vdisplay > native_mode->vdisplay))
+			return MODE_PANEL;
+
+		/* if scaling is disabled, block non-native modes */
+		if (amdgpu_encoder->rmx_type == RMX_OFF) {
+			if ((mode->hdisplay != native_mode->hdisplay) ||
+			    (mode->vdisplay != native_mode->vdisplay))
+				return MODE_PANEL;
+		}
+	}
+
+	return MODE_OK;
+}
+
+static enum drm_connector_status
+amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
+{
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+	struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
+	enum drm_connector_status ret = connector_status_disconnected;
+	int r;
+
+	r = pm_runtime_get_sync(connector->dev->dev);
+	if (r < 0)
+		return connector_status_disconnected;
+
+	if (encoder) {
+		struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+		struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+
+		/* check if panel is valid */
+		if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
+			ret = connector_status_connected;
+
+	}
+
+	/* check for edid as well */
+	amdgpu_connector_get_edid(connector);
+	if (amdgpu_connector->edid)
+		ret = connector_status_connected;
+	/* check acpi lid status ??? */
+
+	amdgpu_connector_update_scratch_regs(connector, ret);
+	pm_runtime_mark_last_busy(connector->dev->dev);
+	pm_runtime_put_autosuspend(connector->dev->dev);
+	return ret;
+}
+
+static void amdgpu_connector_destroy(struct drm_connector *connector)
+{
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+	if (amdgpu_connector->ddc_bus->has_aux)
+		drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
+	amdgpu_connector_free_edid(connector);
+	kfree(amdgpu_connector->con_priv);
+	drm_connector_unregister(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static int amdgpu_connector_set_lcd_property(struct drm_connector *connector,
+					      struct drm_property *property,
+					      uint64_t value)
+{
+	struct drm_device *dev = connector->dev;
+	struct amdgpu_encoder *amdgpu_encoder;
+	enum amdgpu_rmx_type rmx_type;
+
+	DRM_DEBUG_KMS("\n");
+	if (property != dev->mode_config.scaling_mode_property)
+		return 0;
+
+	if (connector->encoder)
+		amdgpu_encoder = to_amdgpu_encoder(connector->encoder);
+	else {
+		const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+		amdgpu_encoder = to_amdgpu_encoder(connector_funcs->best_encoder(connector));
+	}
+
+	switch (value) {
+	case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
+	case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
+	case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
+	default:
+	case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
+	}
+	if (amdgpu_encoder->rmx_type == rmx_type)
+		return 0;
+
+	amdgpu_encoder->rmx_type = rmx_type;
+
+	amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
+	return 0;
+}
+
+
+static const struct drm_connector_helper_funcs amdgpu_connector_lvds_helper_funcs = {
+	.get_modes = amdgpu_connector_lvds_get_modes,
+	.mode_valid = amdgpu_connector_lvds_mode_valid,
+	.best_encoder = amdgpu_connector_best_single_encoder,
+};
+
+static const struct drm_connector_funcs amdgpu_connector_lvds_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = amdgpu_connector_lvds_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = amdgpu_connector_destroy,
+	.set_property = amdgpu_connector_set_lcd_property,
+};
+
+static int amdgpu_connector_vga_get_modes(struct drm_connector *connector)
+{
+	int ret;
+
+	amdgpu_connector_get_edid(connector);
+	ret = amdgpu_connector_ddc_get_modes(connector);
+
+	return ret;
+}
+
+static int amdgpu_connector_vga_mode_valid(struct drm_connector *connector,
+					    struct drm_display_mode *mode)
+{
+	struct drm_device *dev = connector->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+
+	/* XXX check mode bandwidth */
+
+	if ((mode->clock / 10) > adev->clock.max_pixel_clock)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static enum drm_connector_status
+amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
+{
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+	struct drm_encoder *encoder;
+	const struct drm_encoder_helper_funcs *encoder_funcs;
+	bool dret = false;
+	enum drm_connector_status ret = connector_status_disconnected;
+	int r;
+
+	r = pm_runtime_get_sync(connector->dev->dev);
+	if (r < 0)
+		return connector_status_disconnected;
+
+	encoder = amdgpu_connector_best_single_encoder(connector);
+	if (!encoder)
+		ret = connector_status_disconnected;
+
+	if (amdgpu_connector->ddc_bus)
+		dret = amdgpu_ddc_probe(amdgpu_connector, false);
+	if (dret) {
+		amdgpu_connector->detected_by_load = false;
+		amdgpu_connector_free_edid(connector);
+		amdgpu_connector_get_edid(connector);
+
+		if (!amdgpu_connector->edid) {
+			DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
+					connector->name);
+			ret = connector_status_connected;
+		} else {
+			amdgpu_connector->use_digital =
+				!!(amdgpu_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+
+			/* some oems have boards with separate digital and analog connectors
+			 * with a shared ddc line (often vga + hdmi)
+			 */
+			if (amdgpu_connector->use_digital && amdgpu_connector->shared_ddc) {
+				amdgpu_connector_free_edid(connector);
+				ret = connector_status_disconnected;
+			} else {
+				ret = connector_status_connected;
+			}
+		}
+	} else {
+
+		/* if we aren't forcing don't do destructive polling */
+		if (!force) {
+			/* only return the previous status if we last
+			 * detected a monitor via load.
+			 */
+			if (amdgpu_connector->detected_by_load)
+				ret = connector->status;
+			goto out;
+		}
+
+		if (amdgpu_connector->dac_load_detect && encoder) {
+			encoder_funcs = encoder->helper_private;
+			ret = encoder_funcs->detect(encoder, connector);
+			if (ret != connector_status_disconnected)
+				amdgpu_connector->detected_by_load = true;
+		}
+	}
+
+	amdgpu_connector_update_scratch_regs(connector, ret);
+
+out:
+	pm_runtime_mark_last_busy(connector->dev->dev);
+	pm_runtime_put_autosuspend(connector->dev->dev);
+
+	return ret;
+}
+
+static const struct drm_connector_helper_funcs amdgpu_connector_vga_helper_funcs = {
+	.get_modes = amdgpu_connector_vga_get_modes,
+	.mode_valid = amdgpu_connector_vga_mode_valid,
+	.best_encoder = amdgpu_connector_best_single_encoder,
+};
+
+static const struct drm_connector_funcs amdgpu_connector_vga_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = amdgpu_connector_vga_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = amdgpu_connector_destroy,
+	.set_property = amdgpu_connector_set_property,
+};
+
+static bool
+amdgpu_connector_check_hpd_status_unchanged(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+	enum drm_connector_status status;
+
+	if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE) {
+		if (amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd))
+			status = connector_status_connected;
+		else
+			status = connector_status_disconnected;
+		if (connector->status == status)
+			return true;
+	}
+
+	return false;
+}
+
+/*
+ * DVI is complicated
+ * Do a DDC probe, if DDC probe passes, get the full EDID so
+ * we can do analog/digital monitor detection at this point.
+ * If the monitor is an analog monitor or we got no DDC,
+ * we need to find the DAC encoder object for this connector.
+ * If we got no DDC, we do load detection on the DAC encoder object.
+ * If we got analog DDC or load detection passes on the DAC encoder
+ * we have to check if this analog encoder is shared with anyone else (TV)
+ * if its shared we have to set the other connector to disconnected.
+ */
+static enum drm_connector_status
+amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_device *dev = connector->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+	struct drm_encoder *encoder = NULL;
+	const struct drm_encoder_helper_funcs *encoder_funcs;
+	int i, r;
+	enum drm_connector_status ret = connector_status_disconnected;
+	bool dret = false, broken_edid = false;
+
+	r = pm_runtime_get_sync(connector->dev->dev);
+	if (r < 0)
+		return connector_status_disconnected;
+
+	if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
+		ret = connector->status;
+		goto exit;
+	}
+
+	if (amdgpu_connector->ddc_bus)
+		dret = amdgpu_ddc_probe(amdgpu_connector, false);
+	if (dret) {
+		amdgpu_connector->detected_by_load = false;
+		amdgpu_connector_free_edid(connector);
+		amdgpu_connector_get_edid(connector);
+
+		if (!amdgpu_connector->edid) {
+			DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
+					connector->name);
+			ret = connector_status_connected;
+			broken_edid = true; /* defer use_digital to later */
+		} else {
+			amdgpu_connector->use_digital =
+				!!(amdgpu_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+
+			/* some oems have boards with separate digital and analog connectors
+			 * with a shared ddc line (often vga + hdmi)
+			 */
+			if ((!amdgpu_connector->use_digital) && amdgpu_connector->shared_ddc) {
+				amdgpu_connector_free_edid(connector);
+				ret = connector_status_disconnected;
+			} else {
+				ret = connector_status_connected;
+			}
+
+			/* This gets complicated.  We have boards with VGA + HDMI with a
+			 * shared DDC line and we have boards with DVI-D + HDMI with a shared
+			 * DDC line.  The latter is more complex because with DVI<->HDMI adapters
+			 * you don't really know what's connected to which port as both are digital.
+			 */
+			if (amdgpu_connector->shared_ddc && (ret == connector_status_connected)) {
+				struct drm_connector *list_connector;
+				struct amdgpu_connector *list_amdgpu_connector;
+				list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
+					if (connector == list_connector)
+						continue;
+					list_amdgpu_connector = to_amdgpu_connector(list_connector);
+					if (list_amdgpu_connector->shared_ddc &&
+					    (list_amdgpu_connector->ddc_bus->rec.i2c_id ==
+					     amdgpu_connector->ddc_bus->rec.i2c_id)) {
+						/* cases where both connectors are digital */
+						if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
+							/* hpd is our only option in this case */
+							if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
+								amdgpu_connector_free_edid(connector);
+								ret = connector_status_disconnected;
+							}
+						}
+					}
+				}
+			}
+		}
+	}
+
+	if ((ret == connector_status_connected) && (amdgpu_connector->use_digital == true))
+		goto out;
+
+	/* DVI-D and HDMI-A are digital only */
+	if ((connector->connector_type == DRM_MODE_CONNECTOR_DVID) ||
+	    (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA))
+		goto out;
+
+	/* if we aren't forcing don't do destructive polling */
+	if (!force) {
+		/* only return the previous status if we last
+		 * detected a monitor via load.
+		 */
+		if (amdgpu_connector->detected_by_load)
+			ret = connector->status;
+		goto out;
+	}
+
+	/* find analog encoder */
+	if (amdgpu_connector->dac_load_detect) {
+		for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+			if (connector->encoder_ids[i] == 0)
+				break;
+
+			encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+			if (!encoder)
+				continue;
+
+			if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
+			    encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
+				continue;
+
+			encoder_funcs = encoder->helper_private;
+			if (encoder_funcs->detect) {
+				if (!broken_edid) {
+					if (ret != connector_status_connected) {
+						/* deal with analog monitors without DDC */
+						ret = encoder_funcs->detect(encoder, connector);
+						if (ret == connector_status_connected) {
+							amdgpu_connector->use_digital = false;
+						}
+						if (ret != connector_status_disconnected)
+							amdgpu_connector->detected_by_load = true;
+					}
+				} else {
+					enum drm_connector_status lret;
+					/* assume digital unless load detected otherwise */
+					amdgpu_connector->use_digital = true;
+					lret = encoder_funcs->detect(encoder, connector);
+					DRM_DEBUG_KMS("load_detect %x returned: %x\n",encoder->encoder_type,lret);
+					if (lret == connector_status_connected)
+						amdgpu_connector->use_digital = false;
+				}
+				break;
+			}
+		}
+	}
+
+out:
+	/* updated in get modes as well since we need to know if it's analog or digital */
+	amdgpu_connector_update_scratch_regs(connector, ret);
+
+exit:
+	pm_runtime_mark_last_busy(connector->dev->dev);
+	pm_runtime_put_autosuspend(connector->dev->dev);
+
+	return ret;
+}
+
+/* okay need to be smart in here about which encoder to pick */
+static struct drm_encoder *
+amdgpu_connector_dvi_encoder(struct drm_connector *connector)
+{
+	int enc_id = connector->encoder_ids[0];
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+	struct drm_encoder *encoder;
+	int i;
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0)
+			break;
+
+		encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+		if (!encoder)
+			continue;
+
+		if (amdgpu_connector->use_digital == true) {
+			if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
+				return encoder;
+		} else {
+			if (encoder->encoder_type == DRM_MODE_ENCODER_DAC ||
+			    encoder->encoder_type == DRM_MODE_ENCODER_TVDAC)
+				return encoder;
+		}
+	}
+
+	/* see if we have a default encoder  TODO */
+
+	/* then check use digitial */
+	/* pick the first one */
+	if (enc_id)
+		return drm_encoder_find(connector->dev, enc_id);
+	return NULL;
+}
+
+static void amdgpu_connector_dvi_force(struct drm_connector *connector)
+{
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+	if (connector->force == DRM_FORCE_ON)
+		amdgpu_connector->use_digital = false;
+	if (connector->force == DRM_FORCE_ON_DIGITAL)
+		amdgpu_connector->use_digital = true;
+}
+
+static int amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
+					    struct drm_display_mode *mode)
+{
+	struct drm_device *dev = connector->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+	/* XXX check mode bandwidth */
+
+	if (amdgpu_connector->use_digital && (mode->clock > 165000)) {
+		if ((amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
+		    (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
+		    (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) {
+			return MODE_OK;
+		} else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+			/* HDMI 1.3+ supports max clock of 340 Mhz */
+			if (mode->clock > 340000)
+				return MODE_CLOCK_HIGH;
+			else
+				return MODE_OK;
+		} else {
+			return MODE_CLOCK_HIGH;
+		}
+	}
+
+	/* check against the max pixel clock */
+	if ((mode->clock / 10) > adev->clock.max_pixel_clock)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs amdgpu_connector_dvi_helper_funcs = {
+	.get_modes = amdgpu_connector_vga_get_modes,
+	.mode_valid = amdgpu_connector_dvi_mode_valid,
+	.best_encoder = amdgpu_connector_dvi_encoder,
+};
+
+static const struct drm_connector_funcs amdgpu_connector_dvi_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = amdgpu_connector_dvi_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = amdgpu_connector_set_property,
+	.destroy = amdgpu_connector_destroy,
+	.force = amdgpu_connector_dvi_force,
+};
+
+static int amdgpu_connector_dp_get_modes(struct drm_connector *connector)
+{
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+	struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv;
+	struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
+	int ret;
+
+	if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+	    (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+		struct drm_display_mode *mode;
+
+		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+			if (!amdgpu_dig_connector->edp_on)
+				amdgpu_atombios_encoder_set_edp_panel_power(connector,
+								     ATOM_TRANSMITTER_ACTION_POWER_ON);
+			amdgpu_connector_get_edid(connector);
+			ret = amdgpu_connector_ddc_get_modes(connector);
+			if (!amdgpu_dig_connector->edp_on)
+				amdgpu_atombios_encoder_set_edp_panel_power(connector,
+								     ATOM_TRANSMITTER_ACTION_POWER_OFF);
+		} else {
+			/* need to setup ddc on the bridge */
+			if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+			    ENCODER_OBJECT_ID_NONE) {
+				if (encoder)
+					amdgpu_atombios_encoder_setup_ext_encoder_ddc(encoder);
+			}
+			amdgpu_connector_get_edid(connector);
+			ret = amdgpu_connector_ddc_get_modes(connector);
+		}
+
+		if (ret > 0) {
+			if (encoder) {
+				amdgpu_connector_fixup_lcd_native_mode(encoder, connector);
+				/* add scaled modes */
+				amdgpu_connector_add_common_modes(encoder, connector);
+			}
+			return ret;
+		}
+
+		if (!encoder)
+			return 0;
+
+		/* we have no EDID modes */
+		mode = amdgpu_connector_lcd_native_mode(encoder);
+		if (mode) {
+			ret = 1;
+			drm_mode_probed_add(connector, mode);
+			/* add the width/height from vbios tables if available */
+			connector->display_info.width_mm = mode->width_mm;
+			connector->display_info.height_mm = mode->height_mm;
+			/* add scaled modes */
+			amdgpu_connector_add_common_modes(encoder, connector);
+		}
+	} else {
+		/* need to setup ddc on the bridge */
+		if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+			ENCODER_OBJECT_ID_NONE) {
+			if (encoder)
+				amdgpu_atombios_encoder_setup_ext_encoder_ddc(encoder);
+		}
+		amdgpu_connector_get_edid(connector);
+		ret = amdgpu_connector_ddc_get_modes(connector);
+
+		amdgpu_get_native_mode(connector);
+	}
+
+	return ret;
+}
+
+u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector)
+{
+	struct drm_encoder *encoder;
+	struct amdgpu_encoder *amdgpu_encoder;
+	int i;
+
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0)
+			break;
+
+		encoder = drm_encoder_find(connector->dev,
+					connector->encoder_ids[i]);
+		if (!encoder)
+			continue;
+
+		amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+		switch (amdgpu_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_TRAVIS:
+		case ENCODER_OBJECT_ID_NUTMEG:
+			return amdgpu_encoder->encoder_id;
+		default:
+			break;
+		}
+	}
+
+	return ENCODER_OBJECT_ID_NONE;
+}
+
+static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
+{
+	struct drm_encoder *encoder;
+	struct amdgpu_encoder *amdgpu_encoder;
+	int i;
+	bool found = false;
+
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0)
+			break;
+		encoder = drm_encoder_find(connector->dev,
+					connector->encoder_ids[i]);
+		if (!encoder)
+			continue;
+
+		amdgpu_encoder = to_amdgpu_encoder(encoder);
+		if (amdgpu_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
+			found = true;
+	}
+
+	return found;
+}
+
+bool amdgpu_connector_is_dp12_capable(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+
+	if ((adev->clock.default_dispclk >= 53900) &&
+	    amdgpu_connector_encoder_is_hbr2(connector)) {
+		return true;
+	}
+
+	return false;
+}
+
+static enum drm_connector_status
+amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_device *dev = connector->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+	enum drm_connector_status ret = connector_status_disconnected;
+	struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv;
+	struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
+	int r;
+
+	r = pm_runtime_get_sync(connector->dev->dev);
+	if (r < 0)
+		return connector_status_disconnected;
+
+	if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
+		ret = connector->status;
+		goto out;
+	}
+
+	amdgpu_connector_free_edid(connector);
+
+	if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+	    (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+		if (encoder) {
+			struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+			struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+
+			/* check if panel is valid */
+			if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
+				ret = connector_status_connected;
+		}
+		/* eDP is always DP */
+		amdgpu_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
+		if (!amdgpu_dig_connector->edp_on)
+			amdgpu_atombios_encoder_set_edp_panel_power(connector,
+							     ATOM_TRANSMITTER_ACTION_POWER_ON);
+		if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
+			ret = connector_status_connected;
+		if (!amdgpu_dig_connector->edp_on)
+			amdgpu_atombios_encoder_set_edp_panel_power(connector,
+							     ATOM_TRANSMITTER_ACTION_POWER_OFF);
+	} else if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+		   ENCODER_OBJECT_ID_NONE) {
+		/* DP bridges are always DP */
+		amdgpu_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
+		/* get the DPCD from the bridge */
+		amdgpu_atombios_dp_get_dpcd(amdgpu_connector);
+
+		if (encoder) {
+			/* setup ddc on the bridge */
+			amdgpu_atombios_encoder_setup_ext_encoder_ddc(encoder);
+			/* bridge chips are always aux */
+			if (amdgpu_ddc_probe(amdgpu_connector, true)) /* try DDC */
+				ret = connector_status_connected;
+			else if (amdgpu_connector->dac_load_detect) { /* try load detection */
+				const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+				ret = encoder_funcs->detect(encoder, connector);
+			}
+		}
+	} else {
+		amdgpu_dig_connector->dp_sink_type =
+			amdgpu_atombios_dp_get_sinktype(amdgpu_connector);
+		if (amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
+			ret = connector_status_connected;
+			if (amdgpu_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+				amdgpu_atombios_dp_get_dpcd(amdgpu_connector);
+		} else {
+			if (amdgpu_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+				if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
+					ret = connector_status_connected;
+			} else {
+				/* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
+				if (amdgpu_ddc_probe(amdgpu_connector, false))
+					ret = connector_status_connected;
+			}
+		}
+	}
+
+	amdgpu_connector_update_scratch_regs(connector, ret);
+out:
+	pm_runtime_mark_last_busy(connector->dev->dev);
+	pm_runtime_put_autosuspend(connector->dev->dev);
+
+	return ret;
+}
+
+static int amdgpu_connector_dp_mode_valid(struct drm_connector *connector,
+					   struct drm_display_mode *mode)
+{
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+	struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv;
+
+	/* XXX check mode bandwidth */
+
+	if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+	    (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+		struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
+
+		if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
+			return MODE_PANEL;
+
+		if (encoder) {
+			struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+			struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+
+			/* AVIVO hardware supports downscaling modes larger than the panel
+			 * to the panel size, but I'm not sure this is desirable.
+			 */
+			if ((mode->hdisplay > native_mode->hdisplay) ||
+			    (mode->vdisplay > native_mode->vdisplay))
+				return MODE_PANEL;
+
+			/* if scaling is disabled, block non-native modes */
+			if (amdgpu_encoder->rmx_type == RMX_OFF) {
+				if ((mode->hdisplay != native_mode->hdisplay) ||
+				    (mode->vdisplay != native_mode->vdisplay))
+					return MODE_PANEL;
+			}
+		}
+		return MODE_OK;
+	} else {
+		if ((amdgpu_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+		    (amdgpu_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
+			return amdgpu_atombios_dp_mode_valid_helper(connector, mode);
+		} else {
+			if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+				/* HDMI 1.3+ supports max clock of 340 Mhz */
+				if (mode->clock > 340000)
+					return MODE_CLOCK_HIGH;
+			} else {
+				if (mode->clock > 165000)
+					return MODE_CLOCK_HIGH;
+			}
+		}
+	}
+
+	return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs amdgpu_connector_dp_helper_funcs = {
+	.get_modes = amdgpu_connector_dp_get_modes,
+	.mode_valid = amdgpu_connector_dp_mode_valid,
+	.best_encoder = amdgpu_connector_dvi_encoder,
+};
+
+static const struct drm_connector_funcs amdgpu_connector_dp_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = amdgpu_connector_dp_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = amdgpu_connector_set_property,
+	.destroy = amdgpu_connector_destroy,
+	.force = amdgpu_connector_dvi_force,
+};
+
+static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = amdgpu_connector_dp_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = amdgpu_connector_set_lcd_property,
+	.destroy = amdgpu_connector_destroy,
+	.force = amdgpu_connector_dvi_force,
+};
+
+void
+amdgpu_connector_add(struct amdgpu_device *adev,
+		      uint32_t connector_id,
+		      uint32_t supported_device,
+		      int connector_type,
+		      struct amdgpu_i2c_bus_rec *i2c_bus,
+		      uint16_t connector_object_id,
+		      struct amdgpu_hpd *hpd,
+		      struct amdgpu_router *router)
+{
+	struct drm_device *dev = adev->ddev;
+	struct drm_connector *connector;
+	struct amdgpu_connector *amdgpu_connector;
+	struct amdgpu_connector_atom_dig *amdgpu_dig_connector;
+	struct drm_encoder *encoder;
+	struct amdgpu_encoder *amdgpu_encoder;
+	uint32_t subpixel_order = SubPixelNone;
+	bool shared_ddc = false;
+	bool is_dp_bridge = false;
+	bool has_aux = false;
+
+	if (connector_type == DRM_MODE_CONNECTOR_Unknown)
+		return;
+
+	/* see if we already added it */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		amdgpu_connector = to_amdgpu_connector(connector);
+		if (amdgpu_connector->connector_id == connector_id) {
+			amdgpu_connector->devices |= supported_device;
+			return;
+		}
+		if (amdgpu_connector->ddc_bus && i2c_bus->valid) {
+			if (amdgpu_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
+				amdgpu_connector->shared_ddc = true;
+				shared_ddc = true;
+			}
+			if (amdgpu_connector->router_bus && router->ddc_valid &&
+			    (amdgpu_connector->router.router_id == router->router_id)) {
+				amdgpu_connector->shared_ddc = false;
+				shared_ddc = false;
+			}
+		}
+	}
+
+	/* check if it's a dp bridge */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		amdgpu_encoder = to_amdgpu_encoder(encoder);
+		if (amdgpu_encoder->devices & supported_device) {
+			switch (amdgpu_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_TRAVIS:
+			case ENCODER_OBJECT_ID_NUTMEG:
+				is_dp_bridge = true;
+				break;
+			default:
+				break;
+			}
+		}
+	}
+
+	amdgpu_connector = kzalloc(sizeof(struct amdgpu_connector), GFP_KERNEL);
+	if (!amdgpu_connector)
+		return;
+
+	connector = &amdgpu_connector->base;
+
+	amdgpu_connector->connector_id = connector_id;
+	amdgpu_connector->devices = supported_device;
+	amdgpu_connector->shared_ddc = shared_ddc;
+	amdgpu_connector->connector_object_id = connector_object_id;
+	amdgpu_connector->hpd = *hpd;
+
+	amdgpu_connector->router = *router;
+	if (router->ddc_valid || router->cd_valid) {
+		amdgpu_connector->router_bus = amdgpu_i2c_lookup(adev, &router->i2c_info);
+		if (!amdgpu_connector->router_bus)
+			DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n");
+	}
+
+	if (is_dp_bridge) {
+		amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
+		if (!amdgpu_dig_connector)
+			goto failed;
+		amdgpu_connector->con_priv = amdgpu_dig_connector;
+		if (i2c_bus->valid) {
+			amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
+			if (amdgpu_connector->ddc_bus)
+				has_aux = true;
+			else
+				DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+		}
+		switch (connector_type) {
+		case DRM_MODE_CONNECTOR_VGA:
+		case DRM_MODE_CONNECTOR_DVIA:
+		default:
+			drm_connector_init(dev, &amdgpu_connector->base,
+					   &amdgpu_connector_dp_funcs, connector_type);
+			drm_connector_helper_add(&amdgpu_connector->base,
+						 &amdgpu_connector_dp_helper_funcs);
+			connector->interlace_allowed = true;
+			connector->doublescan_allowed = true;
+			amdgpu_connector->dac_load_detect = true;
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						      adev->mode_info.load_detect_property,
+						      1);
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						   dev->mode_config.scaling_mode_property,
+						   DRM_MODE_SCALE_NONE);
+			break;
+		case DRM_MODE_CONNECTOR_DVII:
+		case DRM_MODE_CONNECTOR_DVID:
+		case DRM_MODE_CONNECTOR_HDMIA:
+		case DRM_MODE_CONNECTOR_HDMIB:
+		case DRM_MODE_CONNECTOR_DisplayPort:
+			drm_connector_init(dev, &amdgpu_connector->base,
+					   &amdgpu_connector_dp_funcs, connector_type);
+			drm_connector_helper_add(&amdgpu_connector->base,
+						 &amdgpu_connector_dp_helper_funcs);
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						      adev->mode_info.underscan_property,
+						      UNDERSCAN_OFF);
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						      adev->mode_info.underscan_hborder_property,
+						      0);
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						      adev->mode_info.underscan_vborder_property,
+						      0);
+
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						   dev->mode_config.scaling_mode_property,
+						   DRM_MODE_SCALE_NONE);
+
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						   adev->mode_info.dither_property,
+						   AMDGPU_FMT_DITHER_DISABLE);
+
+			if (amdgpu_audio != 0)
+				drm_object_attach_property(&amdgpu_connector->base.base,
+							   adev->mode_info.audio_property,
+							   AMDGPU_AUDIO_AUTO);
+
+			subpixel_order = SubPixelHorizontalRGB;
+			connector->interlace_allowed = true;
+			if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
+				connector->doublescan_allowed = true;
+			else
+				connector->doublescan_allowed = false;
+			if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+				amdgpu_connector->dac_load_detect = true;
+				drm_object_attach_property(&amdgpu_connector->base.base,
+							      adev->mode_info.load_detect_property,
+							      1);
+			}
+			break;
+		case DRM_MODE_CONNECTOR_LVDS:
+		case DRM_MODE_CONNECTOR_eDP:
+			drm_connector_init(dev, &amdgpu_connector->base,
+					   &amdgpu_connector_edp_funcs, connector_type);
+			drm_connector_helper_add(&amdgpu_connector->base,
+						 &amdgpu_connector_dp_helper_funcs);
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						      dev->mode_config.scaling_mode_property,
+						      DRM_MODE_SCALE_FULLSCREEN);
+			subpixel_order = SubPixelHorizontalRGB;
+			connector->interlace_allowed = false;
+			connector->doublescan_allowed = false;
+			break;
+		}
+	} else {
+		switch (connector_type) {
+		case DRM_MODE_CONNECTOR_VGA:
+			drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_vga_funcs, connector_type);
+			drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs);
+			if (i2c_bus->valid) {
+				amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
+				if (!amdgpu_connector->ddc_bus)
+					DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			amdgpu_connector->dac_load_detect = true;
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						      adev->mode_info.load_detect_property,
+						      1);
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						   dev->mode_config.scaling_mode_property,
+						   DRM_MODE_SCALE_NONE);
+			/* no HPD on analog connectors */
+			amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE;
+			connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+			connector->interlace_allowed = true;
+			connector->doublescan_allowed = true;
+			break;
+		case DRM_MODE_CONNECTOR_DVIA:
+			drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_vga_funcs, connector_type);
+			drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs);
+			if (i2c_bus->valid) {
+				amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
+				if (!amdgpu_connector->ddc_bus)
+					DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			amdgpu_connector->dac_load_detect = true;
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						      adev->mode_info.load_detect_property,
+						      1);
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						   dev->mode_config.scaling_mode_property,
+						   DRM_MODE_SCALE_NONE);
+			/* no HPD on analog connectors */
+			amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE;
+			connector->interlace_allowed = true;
+			connector->doublescan_allowed = true;
+			break;
+		case DRM_MODE_CONNECTOR_DVII:
+		case DRM_MODE_CONNECTOR_DVID:
+			amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
+			if (!amdgpu_dig_connector)
+				goto failed;
+			amdgpu_connector->con_priv = amdgpu_dig_connector;
+			drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_dvi_funcs, connector_type);
+			drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs);
+			if (i2c_bus->valid) {
+				amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
+				if (!amdgpu_connector->ddc_bus)
+					DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			subpixel_order = SubPixelHorizontalRGB;
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						      adev->mode_info.coherent_mode_property,
+						      1);
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						   adev->mode_info.underscan_property,
+						   UNDERSCAN_OFF);
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						   adev->mode_info.underscan_hborder_property,
+						   0);
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						   adev->mode_info.underscan_vborder_property,
+						   0);
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						   dev->mode_config.scaling_mode_property,
+						   DRM_MODE_SCALE_NONE);
+
+			if (amdgpu_audio != 0) {
+				drm_object_attach_property(&amdgpu_connector->base.base,
+							   adev->mode_info.audio_property,
+							   AMDGPU_AUDIO_AUTO);
+			}
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						   adev->mode_info.dither_property,
+						   AMDGPU_FMT_DITHER_DISABLE);
+			if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+				amdgpu_connector->dac_load_detect = true;
+				drm_object_attach_property(&amdgpu_connector->base.base,
+							   adev->mode_info.load_detect_property,
+							   1);
+			}
+			connector->interlace_allowed = true;
+			if (connector_type == DRM_MODE_CONNECTOR_DVII)
+				connector->doublescan_allowed = true;
+			else
+				connector->doublescan_allowed = false;
+			break;
+		case DRM_MODE_CONNECTOR_HDMIA:
+		case DRM_MODE_CONNECTOR_HDMIB:
+			amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
+			if (!amdgpu_dig_connector)
+				goto failed;
+			amdgpu_connector->con_priv = amdgpu_dig_connector;
+			drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_dvi_funcs, connector_type);
+			drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs);
+			if (i2c_bus->valid) {
+				amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
+				if (!amdgpu_connector->ddc_bus)
+					DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						      adev->mode_info.coherent_mode_property,
+						      1);
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						   adev->mode_info.underscan_property,
+						   UNDERSCAN_OFF);
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						   adev->mode_info.underscan_hborder_property,
+						   0);
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						   adev->mode_info.underscan_vborder_property,
+						   0);
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						   dev->mode_config.scaling_mode_property,
+						   DRM_MODE_SCALE_NONE);
+			if (amdgpu_audio != 0) {
+				drm_object_attach_property(&amdgpu_connector->base.base,
+							   adev->mode_info.audio_property,
+							   AMDGPU_AUDIO_AUTO);
+			}
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						   adev->mode_info.dither_property,
+						   AMDGPU_FMT_DITHER_DISABLE);
+			subpixel_order = SubPixelHorizontalRGB;
+			connector->interlace_allowed = true;
+			if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
+				connector->doublescan_allowed = true;
+			else
+				connector->doublescan_allowed = false;
+			break;
+		case DRM_MODE_CONNECTOR_DisplayPort:
+			amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
+			if (!amdgpu_dig_connector)
+				goto failed;
+			amdgpu_connector->con_priv = amdgpu_dig_connector;
+			drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_dp_funcs, connector_type);
+			drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs);
+			if (i2c_bus->valid) {
+				amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
+				if (amdgpu_connector->ddc_bus)
+					has_aux = true;
+				else
+					DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			subpixel_order = SubPixelHorizontalRGB;
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						      adev->mode_info.coherent_mode_property,
+						      1);
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						   adev->mode_info.underscan_property,
+						   UNDERSCAN_OFF);
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						   adev->mode_info.underscan_hborder_property,
+						   0);
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						   adev->mode_info.underscan_vborder_property,
+						   0);
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						   dev->mode_config.scaling_mode_property,
+						   DRM_MODE_SCALE_NONE);
+			if (amdgpu_audio != 0) {
+				drm_object_attach_property(&amdgpu_connector->base.base,
+							   adev->mode_info.audio_property,
+							   AMDGPU_AUDIO_AUTO);
+			}
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						   adev->mode_info.dither_property,
+						   AMDGPU_FMT_DITHER_DISABLE);
+			connector->interlace_allowed = true;
+			/* in theory with a DP to VGA converter... */
+			connector->doublescan_allowed = false;
+			break;
+		case DRM_MODE_CONNECTOR_eDP:
+			amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
+			if (!amdgpu_dig_connector)
+				goto failed;
+			amdgpu_connector->con_priv = amdgpu_dig_connector;
+			drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_edp_funcs, connector_type);
+			drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs);
+			if (i2c_bus->valid) {
+				amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
+				if (amdgpu_connector->ddc_bus)
+					has_aux = true;
+				else
+					DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						      dev->mode_config.scaling_mode_property,
+						      DRM_MODE_SCALE_FULLSCREEN);
+			subpixel_order = SubPixelHorizontalRGB;
+			connector->interlace_allowed = false;
+			connector->doublescan_allowed = false;
+			break;
+		case DRM_MODE_CONNECTOR_LVDS:
+			amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
+			if (!amdgpu_dig_connector)
+				goto failed;
+			amdgpu_connector->con_priv = amdgpu_dig_connector;
+			drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_lvds_funcs, connector_type);
+			drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_lvds_helper_funcs);
+			if (i2c_bus->valid) {
+				amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
+				if (!amdgpu_connector->ddc_bus)
+					DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			drm_object_attach_property(&amdgpu_connector->base.base,
+						      dev->mode_config.scaling_mode_property,
+						      DRM_MODE_SCALE_FULLSCREEN);
+			subpixel_order = SubPixelHorizontalRGB;
+			connector->interlace_allowed = false;
+			connector->doublescan_allowed = false;
+			break;
+		}
+	}
+
+	if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) {
+		if (i2c_bus->valid)
+			connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+	} else
+		connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+	connector->display_info.subpixel_order = subpixel_order;
+	drm_connector_register(connector);
+
+	if (has_aux)
+		amdgpu_atombios_dp_aux_init(amdgpu_connector);
+
+	return;
+
+failed:
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}

+ 42 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.h

@@ -0,0 +1,42 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_CONNECTORS_H__
+#define __AMDGPU_CONNECTORS_H__
+
+struct edid *amdgpu_connector_edid(struct drm_connector *connector);
+void amdgpu_connector_hotplug(struct drm_connector *connector);
+int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector);
+u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
+bool amdgpu_connector_is_dp12_capable(struct drm_connector *connector);
+void
+amdgpu_connector_add(struct amdgpu_device *adev,
+		      uint32_t connector_id,
+		      uint32_t supported_device,
+		      int connector_type,
+		      struct amdgpu_i2c_bus_rec *i2c_bus,
+		      uint16_t connector_object_id,
+		      struct amdgpu_hpd *hpd,
+		      struct amdgpu_router *router);
+
+#endif

+ 784 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c

@@ -0,0 +1,784 @@
+/*
+ * Copyright 2008 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Jerome Glisse <glisse@freedesktop.org>
+ */
+#include <linux/list_sort.h>
+#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+#include "amdgpu_trace.h"
+
+#define AMDGPU_CS_MAX_PRIORITY		32u
+#define AMDGPU_CS_NUM_BUCKETS		(AMDGPU_CS_MAX_PRIORITY + 1)
+
+/* This is based on the bucket sort with O(n) time complexity.
+ * An item with priority "i" is added to bucket[i]. The lists are then
+ * concatenated in descending order.
+ */
+struct amdgpu_cs_buckets {
+	struct list_head bucket[AMDGPU_CS_NUM_BUCKETS];
+};
+
+static void amdgpu_cs_buckets_init(struct amdgpu_cs_buckets *b)
+{
+	unsigned i;
+
+	for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++)
+		INIT_LIST_HEAD(&b->bucket[i]);
+}
+
+static void amdgpu_cs_buckets_add(struct amdgpu_cs_buckets *b,
+				  struct list_head *item, unsigned priority)
+{
+	/* Since buffers which appear sooner in the relocation list are
+	 * likely to be used more often than buffers which appear later
+	 * in the list, the sort mustn't change the ordering of buffers
+	 * with the same priority, i.e. it must be stable.
+	 */
+	list_add_tail(item, &b->bucket[min(priority, AMDGPU_CS_MAX_PRIORITY)]);
+}
+
+static void amdgpu_cs_buckets_get_list(struct amdgpu_cs_buckets *b,
+				       struct list_head *out_list)
+{
+	unsigned i;
+
+	/* Connect the sorted buckets in the output list. */
+	for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++) {
+		list_splice(&b->bucket[i], out_list);
+	}
+}
+
+int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
+		       u32 ip_instance, u32 ring,
+		       struct amdgpu_ring **out_ring)
+{
+	/* Right now all IPs have only one instance - multiple rings. */
+	if (ip_instance != 0) {
+		DRM_ERROR("invalid ip instance: %d\n", ip_instance);
+		return -EINVAL;
+	}
+
+	switch (ip_type) {
+	default:
+		DRM_ERROR("unknown ip type: %d\n", ip_type);
+		return -EINVAL;
+	case AMDGPU_HW_IP_GFX:
+		if (ring < adev->gfx.num_gfx_rings) {
+			*out_ring = &adev->gfx.gfx_ring[ring];
+		} else {
+			DRM_ERROR("only %d gfx rings are supported now\n",
+				  adev->gfx.num_gfx_rings);
+			return -EINVAL;
+		}
+		break;
+	case AMDGPU_HW_IP_COMPUTE:
+		if (ring < adev->gfx.num_compute_rings) {
+			*out_ring = &adev->gfx.compute_ring[ring];
+		} else {
+			DRM_ERROR("only %d compute rings are supported now\n",
+				  adev->gfx.num_compute_rings);
+			return -EINVAL;
+		}
+		break;
+	case AMDGPU_HW_IP_DMA:
+		if (ring < 2) {
+			*out_ring = &adev->sdma[ring].ring;
+		} else {
+			DRM_ERROR("only two SDMA rings are supported\n");
+			return -EINVAL;
+		}
+		break;
+	case AMDGPU_HW_IP_UVD:
+		*out_ring = &adev->uvd.ring;
+		break;
+	case AMDGPU_HW_IP_VCE:
+		if (ring < 2){
+			*out_ring = &adev->vce.ring[ring];
+		} else {
+			DRM_ERROR("only two VCE rings are supported\n");
+			return -EINVAL;
+		}
+		break;
+	}
+	return 0;
+}
+
+int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
+{
+	union drm_amdgpu_cs *cs = data;
+	uint64_t *chunk_array_user;
+	uint64_t *chunk_array = NULL;
+	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+	unsigned size, i;
+	int r = 0;
+
+	if (!cs->in.num_chunks)
+		goto out;
+
+	p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
+	if (!p->ctx) {
+		r = -EINVAL;
+		goto out;
+	}
+	p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
+
+	/* get chunks */
+	INIT_LIST_HEAD(&p->validated);
+	chunk_array = kcalloc(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
+	if (chunk_array == NULL) {
+		r = -ENOMEM;
+		goto out;
+	}
+
+	chunk_array_user = (uint64_t *)(unsigned long)(cs->in.chunks);
+	if (copy_from_user(chunk_array, chunk_array_user,
+			   sizeof(uint64_t)*cs->in.num_chunks)) {
+		r = -EFAULT;
+		goto out;
+	}
+
+	p->nchunks = cs->in.num_chunks;
+	p->chunks = kcalloc(p->nchunks, sizeof(struct amdgpu_cs_chunk),
+			    GFP_KERNEL);
+	if (p->chunks == NULL) {
+		r = -ENOMEM;
+		goto out;
+	}
+
+	for (i = 0; i < p->nchunks; i++) {
+		struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
+		struct drm_amdgpu_cs_chunk user_chunk;
+		uint32_t __user *cdata;
+
+		chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
+		if (copy_from_user(&user_chunk, chunk_ptr,
+				       sizeof(struct drm_amdgpu_cs_chunk))) {
+			r = -EFAULT;
+			goto out;
+		}
+		p->chunks[i].chunk_id = user_chunk.chunk_id;
+		p->chunks[i].length_dw = user_chunk.length_dw;
+		if (p->chunks[i].chunk_id == AMDGPU_CHUNK_ID_IB)
+			p->num_ibs++;
+
+		size = p->chunks[i].length_dw;
+		cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
+		p->chunks[i].user_ptr = cdata;
+
+		p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
+		if (p->chunks[i].kdata == NULL) {
+			r = -ENOMEM;
+			goto out;
+		}
+		size *= sizeof(uint32_t);
+		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
+			r = -EFAULT;
+			goto out;
+		}
+
+		if (p->chunks[i].chunk_id == AMDGPU_CHUNK_ID_FENCE) {
+			size = sizeof(struct drm_amdgpu_cs_chunk_fence);
+			if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) {
+				uint32_t handle;
+				struct drm_gem_object *gobj;
+				struct drm_amdgpu_cs_chunk_fence *fence_data;
+
+				fence_data = (void *)p->chunks[i].kdata;
+				handle = fence_data->handle;
+				gobj = drm_gem_object_lookup(p->adev->ddev,
+							     p->filp, handle);
+				if (gobj == NULL) {
+					r = -EINVAL;
+					goto out;
+				}
+
+				p->uf.bo = gem_to_amdgpu_bo(gobj);
+				p->uf.offset = fence_data->offset;
+			} else {
+				r = -EINVAL;
+				goto out;
+			}
+		}
+	}
+
+	p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
+	if (!p->ibs) {
+		r = -ENOMEM;
+		goto out;
+	}
+
+out:
+	kfree(chunk_array);
+	return r;
+}
+
+/* Returns how many bytes TTM can move per IB.
+ */
+static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
+{
+	u64 real_vram_size = adev->mc.real_vram_size;
+	u64 vram_usage = atomic64_read(&adev->vram_usage);
+
+	/* This function is based on the current VRAM usage.
+	 *
+	 * - If all of VRAM is free, allow relocating the number of bytes that
+	 *   is equal to 1/4 of the size of VRAM for this IB.
+
+	 * - If more than one half of VRAM is occupied, only allow relocating
+	 *   1 MB of data for this IB.
+	 *
+	 * - From 0 to one half of used VRAM, the threshold decreases
+	 *   linearly.
+	 *         __________________
+	 * 1/4 of -|\               |
+	 * VRAM    | \              |
+	 *         |  \             |
+	 *         |   \            |
+	 *         |    \           |
+	 *         |     \          |
+	 *         |      \         |
+	 *         |       \________|1 MB
+	 *         |----------------|
+	 *    VRAM 0 %             100 %
+	 *         used            used
+	 *
+	 * Note: It's a threshold, not a limit. The threshold must be crossed
+	 * for buffer relocations to stop, so any buffer of an arbitrary size
+	 * can be moved as long as the threshold isn't crossed before
+	 * the relocation takes place. We don't want to disable buffer
+	 * relocations completely.
+	 *
+	 * The idea is that buffers should be placed in VRAM at creation time
+	 * and TTM should only do a minimum number of relocations during
+	 * command submission. In practice, you need to submit at least
+	 * a dozen IBs to move all buffers to VRAM if they are in GTT.
+	 *
+	 * Also, things can get pretty crazy under memory pressure and actual
+	 * VRAM usage can change a lot, so playing safe even at 50% does
+	 * consistently increase performance.
+	 */
+
+	u64 half_vram = real_vram_size >> 1;
+	u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
+	u64 bytes_moved_threshold = half_free_vram >> 1;
+	return max(bytes_moved_threshold, 1024*1024ull);
+}
+
+int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
+{
+	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+	struct amdgpu_vm *vm = &fpriv->vm;
+	struct amdgpu_device *adev = p->adev;
+	struct amdgpu_bo_list_entry *lobj;
+	struct list_head duplicates;
+	struct amdgpu_bo *bo;
+	u64 bytes_moved = 0, initial_bytes_moved;
+	u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev);
+	int r;
+
+	INIT_LIST_HEAD(&duplicates);
+	r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
+	if (unlikely(r != 0)) {
+		return r;
+	}
+
+	list_for_each_entry(lobj, &p->validated, tv.head) {
+		bo = lobj->robj;
+		if (!bo->pin_count) {
+			u32 domain = lobj->prefered_domains;
+			u32 current_domain =
+				amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+
+			/* Check if this buffer will be moved and don't move it
+			 * if we have moved too many buffers for this IB already.
+			 *
+			 * Note that this allows moving at least one buffer of
+			 * any size, because it doesn't take the current "bo"
+			 * into account. We don't want to disallow buffer moves
+			 * completely.
+			 */
+			if (current_domain != AMDGPU_GEM_DOMAIN_CPU &&
+			    (domain & current_domain) == 0 && /* will be moved */
+			    bytes_moved > bytes_moved_threshold) {
+				/* don't move it */
+				domain = current_domain;
+			}
+
+		retry:
+			amdgpu_ttm_placement_from_domain(bo, domain);
+			initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
+			r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+			bytes_moved += atomic64_read(&adev->num_bytes_moved) -
+				       initial_bytes_moved;
+
+			if (unlikely(r)) {
+				if (r != -ERESTARTSYS && domain != lobj->allowed_domains) {
+					domain = lobj->allowed_domains;
+					goto retry;
+				}
+				ttm_eu_backoff_reservation(&p->ticket, &p->validated);
+				return r;
+			}
+		}
+		lobj->bo_va = amdgpu_vm_bo_find(vm, bo);
+	}
+	return 0;
+}
+
+static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
+{
+	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+	struct amdgpu_cs_buckets buckets;
+	bool need_mmap_lock = false;
+	int i, r;
+
+	if (p->bo_list) {
+		need_mmap_lock = p->bo_list->has_userptr;
+		amdgpu_cs_buckets_init(&buckets);
+		for (i = 0; i < p->bo_list->num_entries; i++)
+			amdgpu_cs_buckets_add(&buckets, &p->bo_list->array[i].tv.head,
+								  p->bo_list->array[i].priority);
+
+		amdgpu_cs_buckets_get_list(&buckets, &p->validated);
+	}
+
+	p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm,
+				      &p->validated);
+
+	if (need_mmap_lock)
+		down_read(&current->mm->mmap_sem);
+
+	r = amdgpu_cs_list_validate(p);
+
+	if (need_mmap_lock)
+		up_read(&current->mm->mmap_sem);
+
+	return r;
+}
+
+static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
+{
+	struct amdgpu_bo_list_entry *e;
+	int r;
+
+	list_for_each_entry(e, &p->validated, tv.head) {
+		struct reservation_object *resv = e->robj->tbo.resv;
+		r = amdgpu_sync_resv(p->adev, &p->ibs[0].sync, resv, p->filp);
+
+		if (r)
+			return r;
+	}
+	return 0;
+}
+
+static int cmp_size_smaller_first(void *priv, struct list_head *a,
+				  struct list_head *b)
+{
+	struct amdgpu_bo_list_entry *la = list_entry(a, struct amdgpu_bo_list_entry, tv.head);
+	struct amdgpu_bo_list_entry *lb = list_entry(b, struct amdgpu_bo_list_entry, tv.head);
+
+	/* Sort A before B if A is smaller. */
+	return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
+}
+
+/**
+ * cs_parser_fini() - clean parser states
+ * @parser:	parser structure holding parsing context.
+ * @error:	error number
+ *
+ * If error is set than unvalidate buffer, otherwise just free memory
+ * used by parsing context.
+ **/
+static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
+{
+	unsigned i;
+
+	if (!error) {
+		/* Sort the buffer list from the smallest to largest buffer,
+		 * which affects the order of buffers in the LRU list.
+		 * This assures that the smallest buffers are added first
+		 * to the LRU list, so they are likely to be later evicted
+		 * first, instead of large buffers whose eviction is more
+		 * expensive.
+		 *
+		 * This slightly lowers the number of bytes moved by TTM
+		 * per frame under memory pressure.
+		 */
+		list_sort(NULL, &parser->validated, cmp_size_smaller_first);
+
+		ttm_eu_fence_buffer_objects(&parser->ticket,
+				&parser->validated,
+				&parser->ibs[parser->num_ibs-1].fence->base);
+	} else if (backoff) {
+		ttm_eu_backoff_reservation(&parser->ticket,
+					   &parser->validated);
+	}
+
+	if (parser->ctx)
+		amdgpu_ctx_put(parser->ctx);
+	if (parser->bo_list)
+		amdgpu_bo_list_put(parser->bo_list);
+	drm_free_large(parser->vm_bos);
+	for (i = 0; i < parser->nchunks; i++)
+		drm_free_large(parser->chunks[i].kdata);
+	kfree(parser->chunks);
+	for (i = 0; i < parser->num_ibs; i++)
+		amdgpu_ib_free(parser->adev, &parser->ibs[i]);
+	kfree(parser->ibs);
+	if (parser->uf.bo)
+		drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
+}
+
+static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
+				   struct amdgpu_vm *vm)
+{
+	struct amdgpu_device *adev = p->adev;
+	struct amdgpu_bo_va *bo_va;
+	struct amdgpu_bo *bo;
+	int i, r;
+
+	r = amdgpu_vm_update_page_directory(adev, vm);
+	if (r)
+		return r;
+
+	r = amdgpu_vm_clear_freed(adev, vm);
+	if (r)
+		return r;
+
+	if (p->bo_list) {
+		for (i = 0; i < p->bo_list->num_entries; i++) {
+			/* ignore duplicates */
+			bo = p->bo_list->array[i].robj;
+			if (!bo)
+				continue;
+
+			bo_va = p->bo_list->array[i].bo_va;
+			if (bo_va == NULL)
+				continue;
+
+			r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem);
+			if (r)
+				return r;
+
+			amdgpu_sync_fence(&p->ibs[0].sync, bo_va->last_pt_update);
+		}
+	}
+
+	return amdgpu_vm_clear_invalids(adev, vm, &p->ibs[0].sync);
+}
+
+static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
+				 struct amdgpu_cs_parser *parser)
+{
+	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+	struct amdgpu_vm *vm = &fpriv->vm;
+	struct amdgpu_ring *ring;
+	int i, r;
+
+	if (parser->num_ibs == 0)
+		return 0;
+
+	/* Only for UVD/VCE VM emulation */
+	for (i = 0; i < parser->num_ibs; i++) {
+		ring = parser->ibs[i].ring;
+		if (ring->funcs->parse_cs) {
+			r = amdgpu_ring_parse_cs(ring, parser, i);
+			if (r)
+				return r;
+		}
+	}
+
+	mutex_lock(&vm->mutex);
+	r = amdgpu_bo_vm_update_pte(parser, vm);
+	if (r) {
+		goto out;
+	}
+	amdgpu_cs_sync_rings(parser);
+
+	r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
+			       parser->filp);
+
+out:
+	mutex_unlock(&vm->mutex);
+	return r;
+}
+
+static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r)
+{
+	if (r == -EDEADLK) {
+		r = amdgpu_gpu_reset(adev);
+		if (!r)
+			r = -EAGAIN;
+	}
+	return r;
+}
+
+static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
+			     struct amdgpu_cs_parser *parser)
+{
+	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+	struct amdgpu_vm *vm = &fpriv->vm;
+	int i, j;
+	int r;
+
+	for (i = 0, j = 0; i < parser->nchunks && j < parser->num_ibs; i++) {
+		struct amdgpu_cs_chunk *chunk;
+		struct amdgpu_ib *ib;
+		struct drm_amdgpu_cs_chunk_ib *chunk_ib;
+		struct amdgpu_ring *ring;
+
+		chunk = &parser->chunks[i];
+		ib = &parser->ibs[j];
+		chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
+
+		if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
+			continue;
+
+		r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type,
+				       chunk_ib->ip_instance, chunk_ib->ring,
+				       &ring);
+		if (r)
+			return r;
+
+		if (ring->funcs->parse_cs) {
+			struct amdgpu_bo *aobj = NULL;
+			void *kptr;
+
+			amdgpu_cs_find_mapping(parser, chunk_ib->va_start, &aobj);
+			if (!aobj) {
+				DRM_ERROR("IB va_start is invalid\n");
+				return -EINVAL;
+			}
+
+			/* the IB should be reserved at this point */
+			r = amdgpu_bo_kmap(aobj, &kptr);
+			if (r) {
+				return r;
+			}
+
+			r =  amdgpu_ib_get(ring, NULL, chunk_ib->ib_bytes, ib);
+			if (r) {
+				DRM_ERROR("Failed to get ib !\n");
+				return r;
+			}
+
+			memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
+			amdgpu_bo_kunmap(aobj);
+		} else {
+			r =  amdgpu_ib_get(ring, vm, 0, ib);
+			if (r) {
+				DRM_ERROR("Failed to get ib !\n");
+				return r;
+			}
+
+			ib->gpu_addr = chunk_ib->va_start;
+		}
+
+		ib->length_dw = chunk_ib->ib_bytes / 4;
+		ib->flags = chunk_ib->flags;
+		ib->ctx = parser->ctx;
+		j++;
+	}
+
+	if (!parser->num_ibs)
+		return 0;
+
+	/* add GDS resources to first IB */
+	if (parser->bo_list) {
+		struct amdgpu_bo *gds = parser->bo_list->gds_obj;
+		struct amdgpu_bo *gws = parser->bo_list->gws_obj;
+		struct amdgpu_bo *oa = parser->bo_list->oa_obj;
+		struct amdgpu_ib *ib = &parser->ibs[0];
+
+		if (gds) {
+			ib->gds_base = amdgpu_bo_gpu_offset(gds);
+			ib->gds_size = amdgpu_bo_size(gds);
+		}
+		if (gws) {
+			ib->gws_base = amdgpu_bo_gpu_offset(gws);
+			ib->gws_size = amdgpu_bo_size(gws);
+		}
+		if (oa) {
+			ib->oa_base = amdgpu_bo_gpu_offset(oa);
+			ib->oa_size = amdgpu_bo_size(oa);
+		}
+	}
+
+	/* wrap the last IB with user fence */
+	if (parser->uf.bo) {
+		struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1];
+
+		/* UVD & VCE fw doesn't support user fences */
+		if (ib->ring->type == AMDGPU_RING_TYPE_UVD ||
+		    ib->ring->type == AMDGPU_RING_TYPE_VCE)
+			return -EINVAL;
+
+		ib->user = &parser->uf;
+	}
+
+	return 0;
+}
+
+int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+	union drm_amdgpu_cs *cs = data;
+	struct amdgpu_cs_parser parser;
+	int r, i;
+	bool reserved_buffers = false;
+
+	down_read(&adev->exclusive_lock);
+	if (!adev->accel_working) {
+		up_read(&adev->exclusive_lock);
+		return -EBUSY;
+	}
+	/* initialize parser */
+	memset(&parser, 0, sizeof(struct amdgpu_cs_parser));
+	parser.filp = filp;
+	parser.adev = adev;
+	r = amdgpu_cs_parser_init(&parser, data);
+	if (r) {
+		DRM_ERROR("Failed to initialize parser !\n");
+		amdgpu_cs_parser_fini(&parser, r, false);
+		up_read(&adev->exclusive_lock);
+		r = amdgpu_cs_handle_lockup(adev, r);
+		return r;
+	}
+
+	r = amdgpu_cs_parser_relocs(&parser);
+	if (r) {
+		if (r != -ERESTARTSYS) {
+			if (r == -ENOMEM)
+				DRM_ERROR("Not enough memory for command submission!\n");
+			else
+				DRM_ERROR("Failed to process the buffer list %d!\n", r);
+		}
+	} else {
+		reserved_buffers = true;
+		r = amdgpu_cs_ib_fill(adev, &parser);
+	}
+
+	if (r) {
+		amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
+		up_read(&adev->exclusive_lock);
+		r = amdgpu_cs_handle_lockup(adev, r);
+		return r;
+	}
+
+	for (i = 0; i < parser.num_ibs; i++)
+		trace_amdgpu_cs(&parser, i);
+
+	r = amdgpu_cs_ib_vm_chunk(adev, &parser);
+	if (r) {
+		goto out;
+	}
+
+	cs->out.handle = parser.ibs[parser.num_ibs - 1].fence->seq;
+out:
+	amdgpu_cs_parser_fini(&parser, r, true);
+	up_read(&adev->exclusive_lock);
+	r = amdgpu_cs_handle_lockup(adev, r);
+	return r;
+}
+
+/**
+ * amdgpu_cs_wait_ioctl - wait for a command submission to finish
+ *
+ * @dev: drm device
+ * @data: data from userspace
+ * @filp: file private
+ *
+ * Wait for the command submission identified by handle to finish.
+ */
+int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *filp)
+{
+	union drm_amdgpu_wait_cs *wait = data;
+	struct amdgpu_device *adev = dev->dev_private;
+	uint64_t seq[AMDGPU_MAX_RINGS] = {0};
+	struct amdgpu_ring *ring = NULL;
+	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
+	struct amdgpu_ctx *ctx;
+	long r;
+
+	ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
+	if (ctx == NULL)
+		return -EINVAL;
+
+	r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
+			       wait->in.ring, &ring);
+	if (r)
+		return r;
+
+	seq[ring->idx] = wait->in.handle;
+
+	r = amdgpu_fence_wait_seq_timeout(adev, seq, true, timeout);
+	amdgpu_ctx_put(ctx);
+	if (r < 0)
+		return r;
+
+	memset(wait, 0, sizeof(*wait));
+	wait->out.status = (r == 0);
+
+	return 0;
+}
+
+/**
+ * amdgpu_cs_find_bo_va - find bo_va for VM address
+ *
+ * @parser: command submission parser context
+ * @addr: VM address
+ * @bo: resulting BO of the mapping found
+ *
+ * Search the buffer objects in the command submission context for a certain
+ * virtual memory address. Returns allocation structure when found, NULL
+ * otherwise.
+ */
+struct amdgpu_bo_va_mapping *
+amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
+		       uint64_t addr, struct amdgpu_bo **bo)
+{
+	struct amdgpu_bo_list_entry *reloc;
+	struct amdgpu_bo_va_mapping *mapping;
+
+	addr /= AMDGPU_GPU_PAGE_SIZE;
+
+	list_for_each_entry(reloc, &parser->validated, tv.head) {
+		if (!reloc->bo_va)
+			continue;
+
+		list_for_each_entry(mapping, &reloc->bo_va->mappings, list) {
+			if (mapping->it.start > addr ||
+			    addr > mapping->it.last)
+				continue;
+
+			*bo = reloc->bo_va->bo;
+			return mapping;
+		}
+	}
+
+	return NULL;
+}

+ 193 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c

@@ -0,0 +1,193 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: monk liu <monk.liu@amd.com>
+ */
+
+#include <drm/drmP.h>
+#include "amdgpu.h"
+
+static void amdgpu_ctx_do_release(struct kref *ref)
+{
+	struct amdgpu_ctx *ctx;
+	struct amdgpu_ctx_mgr *mgr;
+
+	ctx = container_of(ref, struct amdgpu_ctx, refcount);
+	mgr = &ctx->fpriv->ctx_mgr;
+
+	idr_remove(&mgr->ctx_handles, ctx->id);
+	kfree(ctx);
+}
+
+int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t *id, uint32_t flags)
+{
+	int r;
+	struct amdgpu_ctx *ctx;
+	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+
+	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	mutex_lock(&mgr->lock);
+	r = idr_alloc(&mgr->ctx_handles, ctx, 0, 0, GFP_KERNEL);
+	if (r < 0) {
+		mutex_unlock(&mgr->lock);
+		kfree(ctx);
+		return r;
+	}
+	*id = (uint32_t)r;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->id = *id;
+	ctx->fpriv = fpriv;
+	kref_init(&ctx->refcount);
+	mutex_unlock(&mgr->lock);
+
+	return 0;
+}
+
+int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id)
+{
+	struct amdgpu_ctx *ctx;
+	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+
+	mutex_lock(&mgr->lock);
+	ctx = idr_find(&mgr->ctx_handles, id);
+	if (ctx) {
+		kref_put(&ctx->refcount, amdgpu_ctx_do_release);
+		mutex_unlock(&mgr->lock);
+		return 0;
+	}
+	mutex_unlock(&mgr->lock);
+	return -EINVAL;
+}
+
+static int amdgpu_ctx_query(struct amdgpu_device *adev,
+			    struct amdgpu_fpriv *fpriv, uint32_t id,
+			    union drm_amdgpu_ctx_out *out)
+{
+	struct amdgpu_ctx *ctx;
+	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+	unsigned reset_counter;
+
+	mutex_lock(&mgr->lock);
+	ctx = idr_find(&mgr->ctx_handles, id);
+	if (!ctx) {
+		mutex_unlock(&mgr->lock);
+		return -EINVAL;
+	}
+
+	/* TODO: these two are always zero */
+	out->state.flags = ctx->state.flags;
+	out->state.hangs = ctx->state.hangs;
+
+	/* determine if a GPU reset has occured since the last call */
+	reset_counter = atomic_read(&adev->gpu_reset_counter);
+	/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
+	if (ctx->reset_counter == reset_counter)
+		out->state.reset_status = AMDGPU_CTX_NO_RESET;
+	else
+		out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
+	ctx->reset_counter = reset_counter;
+
+	mutex_unlock(&mgr->lock);
+	return 0;
+}
+
+void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv)
+{
+	struct idr *idp;
+	struct amdgpu_ctx *ctx;
+	uint32_t id;
+	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+	idp = &mgr->ctx_handles;
+
+	idr_for_each_entry(idp,ctx,id) {
+		if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
+			DRM_ERROR("ctx (id=%ul) is still alive\n",ctx->id);
+	}
+
+	mutex_destroy(&mgr->lock);
+}
+
+int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
+		     struct drm_file *filp)
+{
+	int r;
+	uint32_t id;
+	uint32_t flags;
+
+	union drm_amdgpu_ctx *args = data;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_fpriv *fpriv = filp->driver_priv;
+
+	r = 0;
+	id = args->in.ctx_id;
+	flags = args->in.flags;
+
+	switch (args->in.op) {
+		case AMDGPU_CTX_OP_ALLOC_CTX:
+			r = amdgpu_ctx_alloc(adev, fpriv, &id, flags);
+			args->out.alloc.ctx_id = id;
+			break;
+		case AMDGPU_CTX_OP_FREE_CTX:
+			r = amdgpu_ctx_free(adev, fpriv, id);
+			break;
+		case AMDGPU_CTX_OP_QUERY_STATE:
+			r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
+			break;
+		default:
+			return -EINVAL;
+	}
+
+	return r;
+}
+
+struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
+{
+	struct amdgpu_ctx *ctx;
+	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+
+	mutex_lock(&mgr->lock);
+	ctx = idr_find(&mgr->ctx_handles, id);
+	if (ctx)
+		kref_get(&ctx->refcount);
+	mutex_unlock(&mgr->lock);
+	return ctx;
+}
+
+int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
+{
+	struct amdgpu_fpriv *fpriv;
+	struct amdgpu_ctx_mgr *mgr;
+
+	if (ctx == NULL)
+		return -EINVAL;
+
+	fpriv = ctx->fpriv;
+	mgr = &fpriv->ctx_mgr;
+	mutex_lock(&mgr->lock);
+	kref_put(&ctx->refcount, amdgpu_ctx_do_release);
+	mutex_unlock(&mgr->lock);
+
+	return 0;
+}

+ 2003 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

@@ -0,0 +1,2003 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/console.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/amdgpu_drm.h>
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
+#include <linux/efi.h>
+#include "amdgpu.h"
+#include "amdgpu_i2c.h"
+#include "atom.h"
+#include "amdgpu_atombios.h"
+#ifdef CONFIG_DRM_AMDGPU_CIK
+#include "cik.h"
+#endif
+#include "vi.h"
+#include "bif/bif_4_1_d.h"
+
+static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
+static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
+
+static const char *amdgpu_asic_name[] = {
+	"BONAIRE",
+	"KAVERI",
+	"KABINI",
+	"HAWAII",
+	"MULLINS",
+	"TOPAZ",
+	"TONGA",
+	"CARRIZO",
+	"LAST",
+};
+
+bool amdgpu_device_is_px(struct drm_device *dev)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+
+	if (adev->flags & AMDGPU_IS_PX)
+		return true;
+	return false;
+}
+
+/*
+ * MMIO register access helper functions.
+ */
+uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
+			bool always_indirect)
+{
+	if ((reg * 4) < adev->rmmio_size && !always_indirect)
+		return readl(((void __iomem *)adev->rmmio) + (reg * 4));
+	else {
+		unsigned long flags;
+		uint32_t ret;
+
+		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
+		ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
+		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+
+		return ret;
+	}
+}
+
+void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
+		    bool always_indirect)
+{
+	if ((reg * 4) < adev->rmmio_size && !always_indirect)
+		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
+	else {
+		unsigned long flags;
+
+		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
+		writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
+		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+	}
+}
+
+u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
+{
+	if ((reg * 4) < adev->rio_mem_size)
+		return ioread32(adev->rio_mem + (reg * 4));
+	else {
+		iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
+		return ioread32(adev->rio_mem + (mmMM_DATA * 4));
+	}
+}
+
+void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+
+	if ((reg * 4) < adev->rio_mem_size)
+		iowrite32(v, adev->rio_mem + (reg * 4));
+	else {
+		iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
+		iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
+	}
+}
+
+/**
+ * amdgpu_mm_rdoorbell - read a doorbell dword
+ *
+ * @adev: amdgpu_device pointer
+ * @index: doorbell index
+ *
+ * Returns the value in the doorbell aperture at the
+ * requested doorbell index (CIK).
+ */
+u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
+{
+	if (index < adev->doorbell.num_doorbells) {
+		return readl(adev->doorbell.ptr + index);
+	} else {
+		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
+		return 0;
+	}
+}
+
+/**
+ * amdgpu_mm_wdoorbell - write a doorbell dword
+ *
+ * @adev: amdgpu_device pointer
+ * @index: doorbell index
+ * @v: value to write
+ *
+ * Writes @v to the doorbell aperture at the
+ * requested doorbell index (CIK).
+ */
+void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
+{
+	if (index < adev->doorbell.num_doorbells) {
+		writel(v, adev->doorbell.ptr + index);
+	} else {
+		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
+	}
+}
+
+/**
+ * amdgpu_invalid_rreg - dummy reg read function
+ *
+ * @adev: amdgpu device pointer
+ * @reg: offset of register
+ *
+ * Dummy register read function.  Used for register blocks
+ * that certain asics don't have (all asics).
+ * Returns the value in the register.
+ */
+static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
+{
+	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
+	BUG();
+	return 0;
+}
+
+/**
+ * amdgpu_invalid_wreg - dummy reg write function
+ *
+ * @adev: amdgpu device pointer
+ * @reg: offset of register
+ * @v: value to write to the register
+ *
+ * Dummy register read function.  Used for register blocks
+ * that certain asics don't have (all asics).
+ */
+static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
+{
+	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
+		  reg, v);
+	BUG();
+}
+
+/**
+ * amdgpu_block_invalid_rreg - dummy reg read function
+ *
+ * @adev: amdgpu device pointer
+ * @block: offset of instance
+ * @reg: offset of register
+ *
+ * Dummy register read function.  Used for register blocks
+ * that certain asics don't have (all asics).
+ * Returns the value in the register.
+ */
+static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
+					  uint32_t block, uint32_t reg)
+{
+	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
+		  reg, block);
+	BUG();
+	return 0;
+}
+
+/**
+ * amdgpu_block_invalid_wreg - dummy reg write function
+ *
+ * @adev: amdgpu device pointer
+ * @block: offset of instance
+ * @reg: offset of register
+ * @v: value to write to the register
+ *
+ * Dummy register read function.  Used for register blocks
+ * that certain asics don't have (all asics).
+ */
+static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
+				      uint32_t block,
+				      uint32_t reg, uint32_t v)
+{
+	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
+		  reg, block, v);
+	BUG();
+}
+
+static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
+{
+	int r;
+
+	if (adev->vram_scratch.robj == NULL) {
+		r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
+				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
+				     NULL, &adev->vram_scratch.robj);
+		if (r) {
+			return r;
+		}
+	}
+
+	r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = amdgpu_bo_pin(adev->vram_scratch.robj,
+			  AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
+	if (r) {
+		amdgpu_bo_unreserve(adev->vram_scratch.robj);
+		return r;
+	}
+	r = amdgpu_bo_kmap(adev->vram_scratch.robj,
+				(void **)&adev->vram_scratch.ptr);
+	if (r)
+		amdgpu_bo_unpin(adev->vram_scratch.robj);
+	amdgpu_bo_unreserve(adev->vram_scratch.robj);
+
+	return r;
+}
+
+static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
+{
+	int r;
+
+	if (adev->vram_scratch.robj == NULL) {
+		return;
+	}
+	r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
+	if (likely(r == 0)) {
+		amdgpu_bo_kunmap(adev->vram_scratch.robj);
+		amdgpu_bo_unpin(adev->vram_scratch.robj);
+		amdgpu_bo_unreserve(adev->vram_scratch.robj);
+	}
+	amdgpu_bo_unref(&adev->vram_scratch.robj);
+}
+
+/**
+ * amdgpu_program_register_sequence - program an array of registers.
+ *
+ * @adev: amdgpu_device pointer
+ * @registers: pointer to the register array
+ * @array_size: size of the register array
+ *
+ * Programs an array or registers with and and or masks.
+ * This is a helper for setting golden registers.
+ */
+void amdgpu_program_register_sequence(struct amdgpu_device *adev,
+				      const u32 *registers,
+				      const u32 array_size)
+{
+	u32 tmp, reg, and_mask, or_mask;
+	int i;
+
+	if (array_size % 3)
+		return;
+
+	for (i = 0; i < array_size; i +=3) {
+		reg = registers[i + 0];
+		and_mask = registers[i + 1];
+		or_mask = registers[i + 2];
+
+		if (and_mask == 0xffffffff) {
+			tmp = or_mask;
+		} else {
+			tmp = RREG32(reg);
+			tmp &= ~and_mask;
+			tmp |= or_mask;
+		}
+		WREG32(reg, tmp);
+	}
+}
+
+void amdgpu_pci_config_reset(struct amdgpu_device *adev)
+{
+	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
+}
+
+/*
+ * GPU doorbell aperture helpers function.
+ */
+/**
+ * amdgpu_doorbell_init - Init doorbell driver information.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Init doorbell driver information (CIK)
+ * Returns 0 on success, error on failure.
+ */
+static int amdgpu_doorbell_init(struct amdgpu_device *adev)
+{
+	/* doorbell bar mapping */
+	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
+	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
+
+	adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32), 
+					     AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
+	if (adev->doorbell.num_doorbells == 0)
+		return -EINVAL;
+
+	adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32));
+	if (adev->doorbell.ptr == NULL) {
+		return -ENOMEM;
+	}
+	DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev->doorbell.base);
+	DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev->doorbell.size);
+
+	return 0;
+}
+
+/**
+ * amdgpu_doorbell_fini - Tear down doorbell driver information.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down doorbell driver information (CIK)
+ */
+static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
+{
+	iounmap(adev->doorbell.ptr);
+	adev->doorbell.ptr = NULL;
+}
+
+/**
+ * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
+ *                                setup amdkfd
+ *
+ * @adev: amdgpu_device pointer
+ * @aperture_base: output returning doorbell aperture base physical address
+ * @aperture_size: output returning doorbell aperture size in bytes
+ * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
+ *
+ * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
+ * takes doorbells required for its own rings and reports the setup to amdkfd.
+ * amdgpu reserved doorbells are at the start of the doorbell aperture.
+ */
+void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
+				phys_addr_t *aperture_base,
+				size_t *aperture_size,
+				size_t *start_offset)
+{
+	/*
+	 * The first num_doorbells are used by amdgpu.
+	 * amdkfd takes whatever's left in the aperture.
+	 */
+	if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
+		*aperture_base = adev->doorbell.base;
+		*aperture_size = adev->doorbell.size;
+		*start_offset = adev->doorbell.num_doorbells * sizeof(u32);
+	} else {
+		*aperture_base = 0;
+		*aperture_size = 0;
+		*start_offset = 0;
+	}
+}
+
+/*
+ * amdgpu_wb_*()
+ * Writeback is the the method by which the the GPU updates special pages
+ * in memory with the status of certain GPU events (fences, ring pointers,
+ * etc.).
+ */
+
+/**
+ * amdgpu_wb_fini - Disable Writeback and free memory
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disables Writeback and frees the Writeback memory (all asics).
+ * Used at driver shutdown.
+ */
+static void amdgpu_wb_fini(struct amdgpu_device *adev)
+{
+	if (adev->wb.wb_obj) {
+		if (!amdgpu_bo_reserve(adev->wb.wb_obj, false)) {
+			amdgpu_bo_kunmap(adev->wb.wb_obj);
+			amdgpu_bo_unpin(adev->wb.wb_obj);
+			amdgpu_bo_unreserve(adev->wb.wb_obj);
+		}
+		amdgpu_bo_unref(&adev->wb.wb_obj);
+		adev->wb.wb = NULL;
+		adev->wb.wb_obj = NULL;
+	}
+}
+
+/**
+ * amdgpu_wb_init- Init Writeback driver info and allocate memory
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disables Writeback and frees the Writeback memory (all asics).
+ * Used at driver startup.
+ * Returns 0 on success or an -error on failure.
+ */
+static int amdgpu_wb_init(struct amdgpu_device *adev)
+{
+	int r;
+
+	if (adev->wb.wb_obj == NULL) {
+		r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
+				     AMDGPU_GEM_DOMAIN_GTT, 0,  NULL, &adev->wb.wb_obj);
+		if (r) {
+			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
+			return r;
+		}
+		r = amdgpu_bo_reserve(adev->wb.wb_obj, false);
+		if (unlikely(r != 0)) {
+			amdgpu_wb_fini(adev);
+			return r;
+		}
+		r = amdgpu_bo_pin(adev->wb.wb_obj, AMDGPU_GEM_DOMAIN_GTT,
+				&adev->wb.gpu_addr);
+		if (r) {
+			amdgpu_bo_unreserve(adev->wb.wb_obj);
+			dev_warn(adev->dev, "(%d) pin WB bo failed\n", r);
+			amdgpu_wb_fini(adev);
+			return r;
+		}
+		r = amdgpu_bo_kmap(adev->wb.wb_obj, (void **)&adev->wb.wb);
+		amdgpu_bo_unreserve(adev->wb.wb_obj);
+		if (r) {
+			dev_warn(adev->dev, "(%d) map WB bo failed\n", r);
+			amdgpu_wb_fini(adev);
+			return r;
+		}
+
+		adev->wb.num_wb = AMDGPU_MAX_WB;
+		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
+
+		/* clear wb memory */
+		memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE);
+	}
+
+	return 0;
+}
+
+/**
+ * amdgpu_wb_get - Allocate a wb entry
+ *
+ * @adev: amdgpu_device pointer
+ * @wb: wb index
+ *
+ * Allocate a wb slot for use by the driver (all asics).
+ * Returns 0 on success or -EINVAL on failure.
+ */
+int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
+{
+	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
+	if (offset < adev->wb.num_wb) {
+		__set_bit(offset, adev->wb.used);
+		*wb = offset;
+		return 0;
+	} else {
+		return -EINVAL;
+	}
+}
+
+/**
+ * amdgpu_wb_free - Free a wb entry
+ *
+ * @adev: amdgpu_device pointer
+ * @wb: wb index
+ *
+ * Free a wb slot allocated for use by the driver (all asics)
+ */
+void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
+{
+	if (wb < adev->wb.num_wb)
+		__clear_bit(wb, adev->wb.used);
+}
+
+/**
+ * amdgpu_vram_location - try to find VRAM location
+ * @adev: amdgpu device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ * @base: base address at which to put VRAM
+ *
+ * Function will place try to place VRAM at base address provided
+ * as parameter (which is so far either PCI aperture address or
+ * for IGP TOM base address).
+ *
+ * If there is not enough space to fit the unvisible VRAM in the 32bits
+ * address space then we limit the VRAM size to the aperture.
+ *
+ * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
+ * this shouldn't be a problem as we are using the PCI aperture as a reference.
+ * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
+ * not IGP.
+ *
+ * Note: we use mc_vram_size as on some board we need to program the mc to
+ * cover the whole aperture even if VRAM size is inferior to aperture size
+ * Novell bug 204882 + along with lots of ubuntu ones
+ *
+ * Note: when limiting vram it's safe to overwritte real_vram_size because
+ * we are not in case where real_vram_size is inferior to mc_vram_size (ie
+ * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
+ * ones)
+ *
+ * Note: IGP TOM addr should be the same as the aperture addr, we don't
+ * explicitly check for that thought.
+ *
+ * FIXME: when reducing VRAM size align new size on power of 2.
+ */
+void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
+{
+	uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
+
+	mc->vram_start = base;
+	if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
+		dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
+		mc->real_vram_size = mc->aper_size;
+		mc->mc_vram_size = mc->aper_size;
+	}
+	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+	if (limit && limit < mc->real_vram_size)
+		mc->real_vram_size = limit;
+	dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
+			mc->mc_vram_size >> 20, mc->vram_start,
+			mc->vram_end, mc->real_vram_size >> 20);
+}
+
+/**
+ * amdgpu_gtt_location - try to find GTT location
+ * @adev: amdgpu device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ *
+ * Function will place try to place GTT before or after VRAM.
+ *
+ * If GTT size is bigger than space left then we ajust GTT size.
+ * Thus function will never fails.
+ *
+ * FIXME: when reducing GTT size align new size on power of 2.
+ */
+void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
+{
+	u64 size_af, size_bf;
+
+	size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
+	size_bf = mc->vram_start & ~mc->gtt_base_align;
+	if (size_bf > size_af) {
+		if (mc->gtt_size > size_bf) {
+			dev_warn(adev->dev, "limiting GTT\n");
+			mc->gtt_size = size_bf;
+		}
+		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
+	} else {
+		if (mc->gtt_size > size_af) {
+			dev_warn(adev->dev, "limiting GTT\n");
+			mc->gtt_size = size_af;
+		}
+		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
+	}
+	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
+	dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
+			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
+}
+
+/*
+ * GPU helpers function.
+ */
+/**
+ * amdgpu_card_posted - check if the hw has already been initialized
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Check if the asic has been initialized (all asics).
+ * Used at driver startup.
+ * Returns true if initialized or false if not.
+ */
+bool amdgpu_card_posted(struct amdgpu_device *adev)
+{
+	uint32_t reg;
+
+	/* then check MEM_SIZE, in case the crtcs are off */
+	reg = RREG32(mmCONFIG_MEMSIZE);
+
+	if (reg)
+		return true;
+
+	return false;
+
+}
+
+/**
+ * amdgpu_boot_test_post_card - check and possibly initialize the hw
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Check if the asic is initialized and if not, attempt to initialize
+ * it (all asics).
+ * Returns true if initialized or false if not.
+ */
+bool amdgpu_boot_test_post_card(struct amdgpu_device *adev)
+{
+	if (amdgpu_card_posted(adev))
+		return true;
+
+	if (adev->bios) {
+		DRM_INFO("GPU not posted. posting now...\n");
+		if (adev->is_atom_bios)
+			amdgpu_atom_asic_init(adev->mode_info.atom_context);
+		return true;
+	} else {
+		dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
+		return false;
+	}
+}
+
+/**
+ * amdgpu_dummy_page_init - init dummy page used by the driver
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate the dummy page used by the driver (all asics).
+ * This dummy page is used by the driver as a filler for gart entries
+ * when pages are taken out of the GART
+ * Returns 0 on sucess, -ENOMEM on failure.
+ */
+int amdgpu_dummy_page_init(struct amdgpu_device *adev)
+{
+	if (adev->dummy_page.page)
+		return 0;
+	adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
+	if (adev->dummy_page.page == NULL)
+		return -ENOMEM;
+	adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
+					0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+	if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
+		dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
+		__free_page(adev->dummy_page.page);
+		adev->dummy_page.page = NULL;
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+/**
+ * amdgpu_dummy_page_fini - free dummy page used by the driver
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Frees the dummy page used by the driver (all asics).
+ */
+void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
+{
+	if (adev->dummy_page.page == NULL)
+		return;
+	pci_unmap_page(adev->pdev, adev->dummy_page.addr,
+			PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+	__free_page(adev->dummy_page.page);
+	adev->dummy_page.page = NULL;
+}
+
+
+/* ATOM accessor methods */
+/*
+ * ATOM is an interpreted byte code stored in tables in the vbios.  The
+ * driver registers callbacks to access registers and the interpreter
+ * in the driver parses the tables and executes then to program specific
+ * actions (set display modes, asic init, etc.).  See amdgpu_atombios.c,
+ * atombios.h, and atom.c
+ */
+
+/**
+ * cail_pll_read - read PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the PLL register.
+ */
+static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
+{
+	return 0;
+}
+
+/**
+ * cail_pll_write - write PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+
+}
+
+/**
+ * cail_mc_read - read MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ *
+ * Provides an MC register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MC register.
+ */
+static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
+{
+	return 0;
+}
+
+/**
+ * cail_mc_write - write MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MC register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+
+}
+
+/**
+ * cail_reg_write - write MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MMIO register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+	struct amdgpu_device *adev = info->dev->dev_private;
+
+	WREG32(reg, val);
+}
+
+/**
+ * cail_reg_read - read MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ *
+ * Provides an MMIO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MMIO register.
+ */
+static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
+{
+	struct amdgpu_device *adev = info->dev->dev_private;
+	uint32_t r;
+
+	r = RREG32(reg);
+	return r;
+}
+
+/**
+ * cail_ioreg_write - write IO register
+ *
+ * @info: atom card_info pointer
+ * @reg: IO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a IO register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+	struct amdgpu_device *adev = info->dev->dev_private;
+
+	WREG32_IO(reg, val);
+}
+
+/**
+ * cail_ioreg_read - read IO register
+ *
+ * @info: atom card_info pointer
+ * @reg: IO register offset
+ *
+ * Provides an IO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the IO register.
+ */
+static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
+{
+	struct amdgpu_device *adev = info->dev->dev_private;
+	uint32_t r;
+
+	r = RREG32_IO(reg);
+	return r;
+}
+
+/**
+ * amdgpu_atombios_fini - free the driver info and callbacks for atombios
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Frees the driver info and register access callbacks for the ATOM
+ * interpreter (r4xx+).
+ * Called at driver shutdown.
+ */
+static void amdgpu_atombios_fini(struct amdgpu_device *adev)
+{
+	if (adev->mode_info.atom_context)
+		kfree(adev->mode_info.atom_context->scratch);
+	kfree(adev->mode_info.atom_context);
+	adev->mode_info.atom_context = NULL;
+	kfree(adev->mode_info.atom_card_info);
+	adev->mode_info.atom_card_info = NULL;
+}
+
+/**
+ * amdgpu_atombios_init - init the driver info and callbacks for atombios
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initializes the driver info and register access callbacks for the
+ * ATOM interpreter (r4xx+).
+ * Returns 0 on sucess, -ENOMEM on failure.
+ * Called at driver startup.
+ */
+static int amdgpu_atombios_init(struct amdgpu_device *adev)
+{
+	struct card_info *atom_card_info =
+	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
+
+	if (!atom_card_info)
+		return -ENOMEM;
+
+	adev->mode_info.atom_card_info = atom_card_info;
+	atom_card_info->dev = adev->ddev;
+	atom_card_info->reg_read = cail_reg_read;
+	atom_card_info->reg_write = cail_reg_write;
+	/* needed for iio ops */
+	if (adev->rio_mem) {
+		atom_card_info->ioreg_read = cail_ioreg_read;
+		atom_card_info->ioreg_write = cail_ioreg_write;
+	} else {
+		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
+		atom_card_info->ioreg_read = cail_reg_read;
+		atom_card_info->ioreg_write = cail_reg_write;
+	}
+	atom_card_info->mc_read = cail_mc_read;
+	atom_card_info->mc_write = cail_mc_write;
+	atom_card_info->pll_read = cail_pll_read;
+	atom_card_info->pll_write = cail_pll_write;
+
+	adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
+	if (!adev->mode_info.atom_context) {
+		amdgpu_atombios_fini(adev);
+		return -ENOMEM;
+	}
+
+	mutex_init(&adev->mode_info.atom_context->mutex);
+	amdgpu_atombios_scratch_regs_init(adev);
+	amdgpu_atom_allocate_fb_scratch(adev->mode_info.atom_context);
+	return 0;
+}
+
+/* if we get transitioned to only one device, take VGA back */
+/**
+ * amdgpu_vga_set_decode - enable/disable vga decode
+ *
+ * @cookie: amdgpu_device pointer
+ * @state: enable/disable vga decode
+ *
+ * Enable/disable vga decode (all asics).
+ * Returns VGA resource flags.
+ */
+static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
+{
+	struct amdgpu_device *adev = cookie;
+	amdgpu_asic_set_vga_state(adev, state);
+	if (state)
+		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+	else
+		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+/**
+ * amdgpu_check_pot_argument - check that argument is a power of two
+ *
+ * @arg: value to check
+ *
+ * Validates that a certain argument is a power of two (all asics).
+ * Returns true if argument is valid.
+ */
+static bool amdgpu_check_pot_argument(int arg)
+{
+	return (arg & (arg - 1)) == 0;
+}
+
+/**
+ * amdgpu_check_arguments - validate module params
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Validates certain module parameters and updates
+ * the associated values used by the driver (all asics).
+ */
+static void amdgpu_check_arguments(struct amdgpu_device *adev)
+{
+	/* vramlimit must be a power of two */
+	if (!amdgpu_check_pot_argument(amdgpu_vram_limit)) {
+		dev_warn(adev->dev, "vram limit (%d) must be a power of 2\n",
+				amdgpu_vram_limit);
+		amdgpu_vram_limit = 0;
+	}
+
+	if (amdgpu_gart_size != -1) {
+		/* gtt size must be power of two and greater or equal to 32M */
+		if (amdgpu_gart_size < 32) {
+			dev_warn(adev->dev, "gart size (%d) too small\n",
+				 amdgpu_gart_size);
+			amdgpu_gart_size = -1;
+		} else if (!amdgpu_check_pot_argument(amdgpu_gart_size)) {
+			dev_warn(adev->dev, "gart size (%d) must be a power of 2\n",
+				 amdgpu_gart_size);
+			amdgpu_gart_size = -1;
+		}
+	}
+
+	if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
+		dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
+			 amdgpu_vm_size);
+		amdgpu_vm_size = 8;
+	}
+
+	if (amdgpu_vm_size < 1) {
+		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
+			 amdgpu_vm_size);
+		amdgpu_vm_size = 8;
+	}
+
+	/*
+	 * Max GPUVM size for Cayman, SI and CI are 40 bits.
+	 */
+	if (amdgpu_vm_size > 1024) {
+		dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
+			 amdgpu_vm_size);
+		amdgpu_vm_size = 8;
+	}
+
+	/* defines number of bits in page table versus page directory,
+	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
+	 * page table and the remaining bits are in the page directory */
+	if (amdgpu_vm_block_size == -1) {
+
+		/* Total bits covered by PD + PTs */
+		unsigned bits = ilog2(amdgpu_vm_size) + 18;
+
+		/* Make sure the PD is 4K in size up to 8GB address space.
+		   Above that split equal between PD and PTs */
+		if (amdgpu_vm_size <= 8)
+			amdgpu_vm_block_size = bits - 9;
+		else
+			amdgpu_vm_block_size = (bits + 3) / 2;
+
+	} else if (amdgpu_vm_block_size < 9) {
+		dev_warn(adev->dev, "VM page table size (%d) too small\n",
+			 amdgpu_vm_block_size);
+		amdgpu_vm_block_size = 9;
+	}
+
+	if (amdgpu_vm_block_size > 24 ||
+	    (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
+		dev_warn(adev->dev, "VM page table size (%d) too large\n",
+			 amdgpu_vm_block_size);
+		amdgpu_vm_block_size = 9;
+	}
+}
+
+/**
+ * amdgpu_switcheroo_set_state - set switcheroo state
+ *
+ * @pdev: pci dev pointer
+ * @state: vga switcheroo state
+ *
+ * Callback for the switcheroo driver.  Suspends or resumes the
+ * the asics before or after it is powered up using ACPI methods.
+ */
+static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
+		return;
+
+	if (state == VGA_SWITCHEROO_ON) {
+		unsigned d3_delay = dev->pdev->d3_delay;
+
+		printk(KERN_INFO "amdgpu: switched on\n");
+		/* don't suspend or resume card normally */
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+
+		amdgpu_resume_kms(dev, true, true);
+
+		dev->pdev->d3_delay = d3_delay;
+
+		dev->switch_power_state = DRM_SWITCH_POWER_ON;
+		drm_kms_helper_poll_enable(dev);
+	} else {
+		printk(KERN_INFO "amdgpu: switched off\n");
+		drm_kms_helper_poll_disable(dev);
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+		amdgpu_suspend_kms(dev, true, true);
+		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
+	}
+}
+
+/**
+ * amdgpu_switcheroo_can_switch - see if switcheroo state can change
+ *
+ * @pdev: pci dev pointer
+ *
+ * Callback for the switcheroo driver.  Check of the switcheroo
+ * state can be changed.
+ * Returns true if the state can be changed, false if not.
+ */
+static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	/*
+	* FIXME: open_count is protected by drm_global_mutex but that would lead to
+	* locking inversion with the driver load path. And the access here is
+	* completely racy anyway. So don't bother with locking for now.
+	*/
+	return dev->open_count == 0;
+}
+
+static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
+	.set_gpu_state = amdgpu_switcheroo_set_state,
+	.reprobe = NULL,
+	.can_switch = amdgpu_switcheroo_can_switch,
+};
+
+int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
+				  enum amd_ip_block_type block_type,
+				  enum amd_clockgating_state state)
+{
+	int i, r = 0;
+
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if (adev->ip_blocks[i].type == block_type) {
+			r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
+									    state);
+			if (r)
+				return r;
+		}
+	}
+	return r;
+}
+
+int amdgpu_set_powergating_state(struct amdgpu_device *adev,
+				  enum amd_ip_block_type block_type,
+				  enum amd_powergating_state state)
+{
+	int i, r = 0;
+
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if (adev->ip_blocks[i].type == block_type) {
+			r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
+									    state);
+			if (r)
+				return r;
+		}
+	}
+	return r;
+}
+
+const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
+					struct amdgpu_device *adev,
+					enum amd_ip_block_type type)
+{
+	int i;
+
+	for (i = 0; i < adev->num_ip_blocks; i++)
+		if (adev->ip_blocks[i].type == type)
+			return &adev->ip_blocks[i];
+
+	return NULL;
+}
+
+/**
+ * amdgpu_ip_block_version_cmp
+ *
+ * @adev: amdgpu_device pointer
+ * @type: enum amd_ip_block_type
+ * @major: major version
+ * @minor: minor version
+ *
+ * return 0 if equal or greater
+ * return 1 if smaller or the ip_block doesn't exist
+ */
+int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
+				enum amd_ip_block_type type,
+				u32 major, u32 minor)
+{
+	const struct amdgpu_ip_block_version *ip_block;
+	ip_block = amdgpu_get_ip_block(adev, type);
+
+	if (ip_block && ((ip_block->major > major) ||
+			((ip_block->major == major) &&
+			(ip_block->minor >= minor))))
+		return 0;
+
+	return 1;
+}
+
+static int amdgpu_early_init(struct amdgpu_device *adev)
+{
+	int i, r;
+
+	switch (adev->asic_type) {
+	case CHIP_TOPAZ:
+	case CHIP_TONGA:
+	case CHIP_CARRIZO:
+		if (adev->asic_type == CHIP_CARRIZO)
+			adev->family = AMDGPU_FAMILY_CZ;
+		else
+			adev->family = AMDGPU_FAMILY_VI;
+
+		r = vi_set_ip_blocks(adev);
+		if (r)
+			return r;
+		break;
+#ifdef CONFIG_DRM_AMDGPU_CIK
+	case CHIP_BONAIRE:
+	case CHIP_HAWAII:
+	case CHIP_KAVERI:
+	case CHIP_KABINI:
+	case CHIP_MULLINS:
+		if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
+			adev->family = AMDGPU_FAMILY_CI;
+		else
+			adev->family = AMDGPU_FAMILY_KV;
+
+		r = cik_set_ip_blocks(adev);
+		if (r)
+			return r;
+		break;
+#endif
+	default:
+		/* FIXME: not supported yet */
+		return -EINVAL;
+	}
+
+
+
+	if (adev->ip_blocks == NULL) {
+		DRM_ERROR("No IP blocks found!\n");
+		return r;
+	}
+
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
+			DRM_ERROR("disabled ip block: %d\n", i);
+			adev->ip_block_enabled[i] = false;
+		} else {
+			if (adev->ip_blocks[i].funcs->early_init) {
+				r = adev->ip_blocks[i].funcs->early_init((void *)adev);
+				if (r)
+					return r;
+			}
+			adev->ip_block_enabled[i] = true;
+		}
+	}
+
+	return 0;
+}
+
+static int amdgpu_init(struct amdgpu_device *adev)
+{
+	int i, r;
+
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if (!adev->ip_block_enabled[i])
+			continue;
+		r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
+		if (r)
+			return r;
+		/* need to do gmc hw init early so we can allocate gpu mem */
+		if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
+			r = amdgpu_vram_scratch_init(adev);
+			if (r)
+				return r;
+			r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
+			if (r)
+				return r;
+			r = amdgpu_wb_init(adev);
+			if (r)
+				return r;
+		}
+	}
+
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if (!adev->ip_block_enabled[i])
+			continue;
+		/* gmc hw init is done early */
+		if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
+			continue;
+		r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
+		if (r)
+			return r;
+	}
+
+	return 0;
+}
+
+static int amdgpu_late_init(struct amdgpu_device *adev)
+{
+	int i = 0, r;
+
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if (!adev->ip_block_enabled[i])
+			continue;
+		/* enable clockgating to save power */
+		r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
+								    AMD_CG_STATE_GATE);
+		if (r)
+			return r;
+		if (adev->ip_blocks[i].funcs->late_init) {
+			r = adev->ip_blocks[i].funcs->late_init((void *)adev);
+			if (r)
+				return r;
+		}
+	}
+
+	return 0;
+}
+
+static int amdgpu_fini(struct amdgpu_device *adev)
+{
+	int i, r;
+
+	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+		if (!adev->ip_block_enabled[i])
+			continue;
+		if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
+			amdgpu_wb_fini(adev);
+			amdgpu_vram_scratch_fini(adev);
+		}
+		/* ungate blocks before hw fini so that we can shutdown the blocks safely */
+		r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
+								    AMD_CG_STATE_UNGATE);
+		if (r)
+			return r;
+		r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
+		/* XXX handle errors */
+	}
+
+	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+		if (!adev->ip_block_enabled[i])
+			continue;
+		r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
+		/* XXX handle errors */
+		adev->ip_block_enabled[i] = false;
+	}
+
+	return 0;
+}
+
+static int amdgpu_suspend(struct amdgpu_device *adev)
+{
+	int i, r;
+
+	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+		if (!adev->ip_block_enabled[i])
+			continue;
+		/* ungate blocks so that suspend can properly shut them down */
+		r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
+								    AMD_CG_STATE_UNGATE);
+		/* XXX handle errors */
+		r = adev->ip_blocks[i].funcs->suspend(adev);
+		/* XXX handle errors */
+	}
+
+	return 0;
+}
+
+static int amdgpu_resume(struct amdgpu_device *adev)
+{
+	int i, r;
+
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if (!adev->ip_block_enabled[i])
+			continue;
+		r = adev->ip_blocks[i].funcs->resume(adev);
+		if (r)
+			return r;
+	}
+
+	return 0;
+}
+
+/**
+ * amdgpu_device_init - initialize the driver
+ *
+ * @adev: amdgpu_device pointer
+ * @pdev: drm dev pointer
+ * @pdev: pci dev pointer
+ * @flags: driver flags
+ *
+ * Initializes the driver info and hw (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver startup.
+ */
+int amdgpu_device_init(struct amdgpu_device *adev,
+		       struct drm_device *ddev,
+		       struct pci_dev *pdev,
+		       uint32_t flags)
+{
+	int r, i;
+	bool runtime = false;
+
+	adev->shutdown = false;
+	adev->dev = &pdev->dev;
+	adev->ddev = ddev;
+	adev->pdev = pdev;
+	adev->flags = flags;
+	adev->asic_type = flags & AMDGPU_ASIC_MASK;
+	adev->is_atom_bios = false;
+	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
+	adev->mc.gtt_size = 512 * 1024 * 1024;
+	adev->accel_working = false;
+	adev->num_rings = 0;
+	adev->mman.buffer_funcs = NULL;
+	adev->mman.buffer_funcs_ring = NULL;
+	adev->vm_manager.vm_pte_funcs = NULL;
+	adev->vm_manager.vm_pte_funcs_ring = NULL;
+	adev->gart.gart_funcs = NULL;
+	adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
+
+	adev->smc_rreg = &amdgpu_invalid_rreg;
+	adev->smc_wreg = &amdgpu_invalid_wreg;
+	adev->pcie_rreg = &amdgpu_invalid_rreg;
+	adev->pcie_wreg = &amdgpu_invalid_wreg;
+	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
+	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
+	adev->didt_rreg = &amdgpu_invalid_rreg;
+	adev->didt_wreg = &amdgpu_invalid_wreg;
+	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
+	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
+
+	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
+		amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
+		pdev->subsystem_vendor, pdev->subsystem_device);
+
+	/* mutex initialization are all done here so we
+	 * can recall function without having locking issues */
+	mutex_init(&adev->ring_lock);
+	atomic_set(&adev->irq.ih.lock, 0);
+	mutex_init(&adev->gem.mutex);
+	mutex_init(&adev->pm.mutex);
+	mutex_init(&adev->gfx.gpu_clock_mutex);
+	mutex_init(&adev->srbm_mutex);
+	mutex_init(&adev->grbm_idx_mutex);
+	init_rwsem(&adev->exclusive_lock);
+	mutex_init(&adev->mn_lock);
+	hash_init(adev->mn_hash);
+
+	amdgpu_check_arguments(adev);
+
+	/* Registers mapping */
+	/* TODO: block userspace mapping of io register */
+	spin_lock_init(&adev->mmio_idx_lock);
+	spin_lock_init(&adev->smc_idx_lock);
+	spin_lock_init(&adev->pcie_idx_lock);
+	spin_lock_init(&adev->uvd_ctx_idx_lock);
+	spin_lock_init(&adev->didt_idx_lock);
+	spin_lock_init(&adev->audio_endpt_idx_lock);
+
+	adev->rmmio_base = pci_resource_start(adev->pdev, 5);
+	adev->rmmio_size = pci_resource_len(adev->pdev, 5);
+	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
+	if (adev->rmmio == NULL) {
+		return -ENOMEM;
+	}
+	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
+	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
+
+	/* doorbell bar mapping */
+	amdgpu_doorbell_init(adev);
+
+	/* io port mapping */
+	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+		if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
+			adev->rio_mem_size = pci_resource_len(adev->pdev, i);
+			adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
+			break;
+		}
+	}
+	if (adev->rio_mem == NULL)
+		DRM_ERROR("Unable to find PCI I/O BAR\n");
+
+	/* early init functions */
+	r = amdgpu_early_init(adev);
+	if (r)
+		return r;
+
+	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
+	/* this will fail for cards that aren't VGA class devices, just
+	 * ignore it */
+	vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
+
+	if (amdgpu_runtime_pm == 1)
+		runtime = true;
+	if (amdgpu_device_is_px(ddev))
+		runtime = true;
+	vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
+	if (runtime)
+		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
+
+	/* Read BIOS */
+	if (!amdgpu_get_bios(adev))
+		return -EINVAL;
+	/* Must be an ATOMBIOS */
+	if (!adev->is_atom_bios) {
+		dev_err(adev->dev, "Expecting atombios for GPU\n");
+		return -EINVAL;
+	}
+	r = amdgpu_atombios_init(adev);
+	if (r)
+		return r;
+
+	/* Post card if necessary */
+	if (!amdgpu_card_posted(adev)) {
+		if (!adev->bios) {
+			dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
+			return -EINVAL;
+		}
+		DRM_INFO("GPU not posted. posting now...\n");
+		amdgpu_atom_asic_init(adev->mode_info.atom_context);
+	}
+
+	/* Initialize clocks */
+	r = amdgpu_atombios_get_clock_info(adev);
+	if (r)
+		return r;
+	/* init i2c buses */
+	amdgpu_atombios_i2c_init(adev);
+
+	/* Fence driver */
+	r = amdgpu_fence_driver_init(adev);
+	if (r)
+		return r;
+
+	/* init the mode config */
+	drm_mode_config_init(adev->ddev);
+
+	r = amdgpu_init(adev);
+	if (r) {
+		amdgpu_fini(adev);
+		return r;
+	}
+
+	adev->accel_working = true;
+
+	amdgpu_fbdev_init(adev);
+
+	r = amdgpu_ib_pool_init(adev);
+	if (r) {
+		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = amdgpu_ib_ring_tests(adev);
+	if (r)
+		DRM_ERROR("ib ring test failed (%d).\n", r);
+
+	r = amdgpu_gem_debugfs_init(adev);
+	if (r) {
+		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
+	}
+
+	r = amdgpu_debugfs_regs_init(adev);
+	if (r) {
+		DRM_ERROR("registering register debugfs failed (%d).\n", r);
+	}
+
+	if ((amdgpu_testing & 1)) {
+		if (adev->accel_working)
+			amdgpu_test_moves(adev);
+		else
+			DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
+	}
+	if ((amdgpu_testing & 2)) {
+		if (adev->accel_working)
+			amdgpu_test_syncing(adev);
+		else
+			DRM_INFO("amdgpu: acceleration disabled, skipping sync tests\n");
+	}
+	if (amdgpu_benchmarking) {
+		if (adev->accel_working)
+			amdgpu_benchmark(adev, amdgpu_benchmarking);
+		else
+			DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
+	}
+
+	/* enable clockgating, etc. after ib tests, etc. since some blocks require
+	 * explicit gating rather than handling it automatically.
+	 */
+	r = amdgpu_late_init(adev);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_device_fini - tear down the driver
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down the driver info (all asics).
+ * Called at driver shutdown.
+ */
+void amdgpu_device_fini(struct amdgpu_device *adev)
+{
+	int r;
+
+	DRM_INFO("amdgpu: finishing device.\n");
+	adev->shutdown = true;
+	/* evict vram memory */
+	amdgpu_bo_evict_vram(adev);
+	amdgpu_ib_pool_fini(adev);
+	amdgpu_fence_driver_fini(adev);
+	amdgpu_fbdev_fini(adev);
+	r = amdgpu_fini(adev);
+	if (adev->ip_block_enabled)
+		kfree(adev->ip_block_enabled);
+	adev->ip_block_enabled = NULL;
+	adev->accel_working = false;
+	/* free i2c buses */
+	amdgpu_i2c_fini(adev);
+	amdgpu_atombios_fini(adev);
+	kfree(adev->bios);
+	adev->bios = NULL;
+	vga_switcheroo_unregister_client(adev->pdev);
+	vga_client_register(adev->pdev, NULL, NULL, NULL);
+	if (adev->rio_mem)
+		pci_iounmap(adev->pdev, adev->rio_mem);
+	adev->rio_mem = NULL;
+	iounmap(adev->rmmio);
+	adev->rmmio = NULL;
+	amdgpu_doorbell_fini(adev);
+	amdgpu_debugfs_regs_cleanup(adev);
+	amdgpu_debugfs_remove_files(adev);
+}
+
+
+/*
+ * Suspend & resume.
+ */
+/**
+ * amdgpu_suspend_kms - initiate device suspend
+ *
+ * @pdev: drm dev pointer
+ * @state: suspend state
+ *
+ * Puts the hw in the suspend state (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver suspend.
+ */
+int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
+{
+	struct amdgpu_device *adev;
+	struct drm_crtc *crtc;
+	struct drm_connector *connector;
+	int i, r;
+	bool force_completion = false;
+
+	if (dev == NULL || dev->dev_private == NULL) {
+		return -ENODEV;
+	}
+
+	adev = dev->dev_private;
+
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+	drm_kms_helper_poll_disable(dev);
+
+	/* turn off display hw */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+	}
+
+	/* unpin the front buffers */
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
+		struct amdgpu_bo *robj;
+
+		if (rfb == NULL || rfb->obj == NULL) {
+			continue;
+		}
+		robj = gem_to_amdgpu_bo(rfb->obj);
+		/* don't unpin kernel fb objects */
+		if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
+			r = amdgpu_bo_reserve(robj, false);
+			if (r == 0) {
+				amdgpu_bo_unpin(robj);
+				amdgpu_bo_unreserve(robj);
+			}
+		}
+	}
+	/* evict vram memory */
+	amdgpu_bo_evict_vram(adev);
+
+	/* wait for gpu to finish processing current batch */
+	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+		struct amdgpu_ring *ring = adev->rings[i];
+		if (!ring)
+			continue;
+
+		r = amdgpu_fence_wait_empty(ring);
+		if (r) {
+			/* delay GPU reset to resume */
+			force_completion = true;
+		}
+	}
+	if (force_completion) {
+		amdgpu_fence_driver_force_completion(adev);
+	}
+
+	r = amdgpu_suspend(adev);
+
+	/* evict remaining vram memory */
+	amdgpu_bo_evict_vram(adev);
+
+	pci_save_state(dev->pdev);
+	if (suspend) {
+		/* Shut down the device */
+		pci_disable_device(dev->pdev);
+		pci_set_power_state(dev->pdev, PCI_D3hot);
+	}
+
+	if (fbcon) {
+		console_lock();
+		amdgpu_fbdev_set_suspend(adev, 1);
+		console_unlock();
+	}
+	return 0;
+}
+
+/**
+ * amdgpu_resume_kms - initiate device resume
+ *
+ * @pdev: drm dev pointer
+ *
+ * Bring the hw back to operating state (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver resume.
+ */
+int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+{
+	struct drm_connector *connector;
+	struct amdgpu_device *adev = dev->dev_private;
+	int r;
+
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+	if (fbcon) {
+		console_lock();
+	}
+	if (resume) {
+		pci_set_power_state(dev->pdev, PCI_D0);
+		pci_restore_state(dev->pdev);
+		if (pci_enable_device(dev->pdev)) {
+			if (fbcon)
+				console_unlock();
+			return -1;
+		}
+	}
+
+	/* post card */
+	amdgpu_atom_asic_init(adev->mode_info.atom_context);
+
+	r = amdgpu_resume(adev);
+
+	r = amdgpu_ib_ring_tests(adev);
+	if (r)
+		DRM_ERROR("ib ring test failed (%d).\n", r);
+
+	r = amdgpu_late_init(adev);
+	if (r)
+		return r;
+
+	/* blat the mode back in */
+	if (fbcon) {
+		drm_helper_resume_force_mode(dev);
+		/* turn on display hw */
+		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+		}
+	}
+
+	drm_kms_helper_poll_enable(dev);
+
+	if (fbcon) {
+		amdgpu_fbdev_set_suspend(adev, 0);
+		console_unlock();
+	}
+
+	return 0;
+}
+
+/**
+ * amdgpu_gpu_reset - reset the asic
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Attempt the reset the GPU if it has hung (all asics).
+ * Returns 0 for success or an error on failure.
+ */
+int amdgpu_gpu_reset(struct amdgpu_device *adev)
+{
+	unsigned ring_sizes[AMDGPU_MAX_RINGS];
+	uint32_t *ring_data[AMDGPU_MAX_RINGS];
+
+	bool saved = false;
+
+	int i, r;
+	int resched;
+
+	down_write(&adev->exclusive_lock);
+
+	if (!adev->needs_reset) {
+		up_write(&adev->exclusive_lock);
+		return 0;
+	}
+
+	adev->needs_reset = false;
+	atomic_inc(&adev->gpu_reset_counter);
+
+	/* block TTM */
+	resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+
+	r = amdgpu_suspend(adev);
+
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+		struct amdgpu_ring *ring = adev->rings[i];
+		if (!ring)
+			continue;
+
+		ring_sizes[i] = amdgpu_ring_backup(ring, &ring_data[i]);
+		if (ring_sizes[i]) {
+			saved = true;
+			dev_info(adev->dev, "Saved %d dwords of commands "
+				 "on ring %d.\n", ring_sizes[i], i);
+		}
+	}
+
+retry:
+	r = amdgpu_asic_reset(adev);
+	if (!r) {
+		dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
+		r = amdgpu_resume(adev);
+	}
+
+	if (!r) {
+		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+			struct amdgpu_ring *ring = adev->rings[i];
+			if (!ring)
+				continue;
+
+			amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]);
+			ring_sizes[i] = 0;
+			ring_data[i] = NULL;
+		}
+
+		r = amdgpu_ib_ring_tests(adev);
+		if (r) {
+			dev_err(adev->dev, "ib ring test failed (%d).\n", r);
+			if (saved) {
+				saved = false;
+				r = amdgpu_suspend(adev);
+				goto retry;
+			}
+		}
+	} else {
+		amdgpu_fence_driver_force_completion(adev);
+		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+			if (adev->rings[i])
+				kfree(ring_data[i]);
+		}
+	}
+
+	drm_helper_resume_force_mode(adev->ddev);
+
+	ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
+	if (r) {
+		/* bad news, how to tell it to userspace ? */
+		dev_info(adev->dev, "GPU reset failed\n");
+	}
+
+	up_write(&adev->exclusive_lock);
+	return r;
+}
+
+
+/*
+ * Debugfs
+ */
+int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
+			     struct drm_info_list *files,
+			     unsigned nfiles)
+{
+	unsigned i;
+
+	for (i = 0; i < adev->debugfs_count; i++) {
+		if (adev->debugfs[i].files == files) {
+			/* Already registered */
+			return 0;
+		}
+	}
+
+	i = adev->debugfs_count + 1;
+	if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
+		DRM_ERROR("Reached maximum number of debugfs components.\n");
+		DRM_ERROR("Report so we increase "
+			  "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
+		return -EINVAL;
+	}
+	adev->debugfs[adev->debugfs_count].files = files;
+	adev->debugfs[adev->debugfs_count].num_files = nfiles;
+	adev->debugfs_count = i;
+#if defined(CONFIG_DEBUG_FS)
+	drm_debugfs_create_files(files, nfiles,
+				 adev->ddev->control->debugfs_root,
+				 adev->ddev->control);
+	drm_debugfs_create_files(files, nfiles,
+				 adev->ddev->primary->debugfs_root,
+				 adev->ddev->primary);
+#endif
+	return 0;
+}
+
+static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	unsigned i;
+
+	for (i = 0; i < adev->debugfs_count; i++) {
+		drm_debugfs_remove_files(adev->debugfs[i].files,
+					 adev->debugfs[i].num_files,
+					 adev->ddev->control);
+		drm_debugfs_remove_files(adev->debugfs[i].files,
+					 adev->debugfs[i].num_files,
+					 adev->ddev->primary);
+	}
+#endif
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
+					size_t size, loff_t *pos)
+{
+	struct amdgpu_device *adev = f->f_inode->i_private;
+	ssize_t result = 0;
+	int r;
+
+	if (size & 0x3 || *pos & 0x3)
+		return -EINVAL;
+
+	while (size) {
+		uint32_t value;
+
+		if (*pos > adev->rmmio_size)
+			return result;
+
+		value = RREG32(*pos >> 2);
+		r = put_user(value, (uint32_t *)buf);
+		if (r)
+			return r;
+
+		result += 4;
+		buf += 4;
+		*pos += 4;
+		size -= 4;
+	}
+
+	return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
+					 size_t size, loff_t *pos)
+{
+	struct amdgpu_device *adev = f->f_inode->i_private;
+	ssize_t result = 0;
+	int r;
+
+	if (size & 0x3 || *pos & 0x3)
+		return -EINVAL;
+
+	while (size) {
+		uint32_t value;
+
+		if (*pos > adev->rmmio_size)
+			return result;
+
+		r = get_user(value, (uint32_t *)buf);
+		if (r)
+			return r;
+
+		WREG32(*pos >> 2, value);
+
+		result += 4;
+		buf += 4;
+		*pos += 4;
+		size -= 4;
+	}
+
+	return result;
+}
+
+static const struct file_operations amdgpu_debugfs_regs_fops = {
+	.owner = THIS_MODULE,
+	.read = amdgpu_debugfs_regs_read,
+	.write = amdgpu_debugfs_regs_write,
+	.llseek = default_llseek
+};
+
+static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
+{
+	struct drm_minor *minor = adev->ddev->primary;
+	struct dentry *ent, *root = minor->debugfs_root;
+
+	ent = debugfs_create_file("amdgpu_regs", S_IFREG | S_IRUGO, root,
+				  adev, &amdgpu_debugfs_regs_fops);
+	if (IS_ERR(ent))
+		return PTR_ERR(ent);
+	i_size_write(ent->d_inode, adev->rmmio_size);
+	adev->debugfs_regs = ent;
+
+	return 0;
+}
+
+static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
+{
+	debugfs_remove(adev->debugfs_regs);
+	adev->debugfs_regs = NULL;
+}
+
+int amdgpu_debugfs_init(struct drm_minor *minor)
+{
+	return 0;
+}
+
+void amdgpu_debugfs_cleanup(struct drm_minor *minor)
+{
+}
+#endif

+ 832 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c

@@ -0,0 +1,832 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+#include "amdgpu_i2c.h"
+#include "atom.h"
+#include "amdgpu_connectors.h"
+#include <asm/div64.h>
+
+#include <linux/pm_runtime.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+
+
+static void amdgpu_flip_work_func(struct work_struct *__work)
+{
+	struct amdgpu_flip_work *work =
+		container_of(__work, struct amdgpu_flip_work, flip_work);
+	struct amdgpu_device *adev = work->adev;
+	struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
+
+	struct drm_crtc *crtc = &amdgpuCrtc->base;
+	struct amdgpu_fence *fence;
+	unsigned long flags;
+	int r;
+
+	down_read(&adev->exclusive_lock);
+	if (work->fence) {
+		fence = to_amdgpu_fence(work->fence);
+		if (fence) {
+			r = amdgpu_fence_wait(fence, false);
+			if (r == -EDEADLK) {
+				up_read(&adev->exclusive_lock);
+				r = amdgpu_gpu_reset(adev);
+				down_read(&adev->exclusive_lock);
+			}
+		} else
+			r = fence_wait(work->fence, false);
+
+		if (r)
+			DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
+
+		/* We continue with the page flip even if we failed to wait on
+		 * the fence, otherwise the DRM core and userspace will be
+		 * confused about which BO the CRTC is scanning out
+		 */
+
+		fence_put(work->fence);
+		work->fence = NULL;
+	}
+
+	/* We borrow the event spin lock for protecting flip_status */
+	spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+	/* set the proper interrupt */
+	amdgpu_irq_get(adev, &adev->pageflip_irq, work->crtc_id);
+	/* do the flip (mmio) */
+	adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
+	/* set the flip status */
+	amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
+
+	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+	up_read(&adev->exclusive_lock);
+}
+
+/*
+ * Handle unpin events outside the interrupt handler proper.
+ */
+static void amdgpu_unpin_work_func(struct work_struct *__work)
+{
+	struct amdgpu_flip_work *work =
+		container_of(__work, struct amdgpu_flip_work, unpin_work);
+	int r;
+
+	/* unpin of the old buffer */
+	r = amdgpu_bo_reserve(work->old_rbo, false);
+	if (likely(r == 0)) {
+		r = amdgpu_bo_unpin(work->old_rbo);
+		if (unlikely(r != 0)) {
+			DRM_ERROR("failed to unpin buffer after flip\n");
+		}
+		amdgpu_bo_unreserve(work->old_rbo);
+	} else
+		DRM_ERROR("failed to reserve buffer after flip\n");
+
+	drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+	kfree(work);
+}
+
+int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
+			  struct drm_framebuffer *fb,
+			  struct drm_pending_vblank_event *event,
+			  uint32_t page_flip_flags)
+{
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct amdgpu_framebuffer *old_amdgpu_fb;
+	struct amdgpu_framebuffer *new_amdgpu_fb;
+	struct drm_gem_object *obj;
+	struct amdgpu_flip_work *work;
+	struct amdgpu_bo *new_rbo;
+	unsigned long flags;
+	u64 tiling_flags;
+	u64 base;
+	int r;
+
+	work = kzalloc(sizeof *work, GFP_KERNEL);
+	if (work == NULL)
+		return -ENOMEM;
+
+	INIT_WORK(&work->flip_work, amdgpu_flip_work_func);
+	INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func);
+
+	work->event = event;
+	work->adev = adev;
+	work->crtc_id = amdgpu_crtc->crtc_id;
+
+	/* schedule unpin of the old buffer */
+	old_amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+	obj = old_amdgpu_fb->obj;
+
+	/* take a reference to the old object */
+	drm_gem_object_reference(obj);
+	work->old_rbo = gem_to_amdgpu_bo(obj);
+
+	new_amdgpu_fb = to_amdgpu_framebuffer(fb);
+	obj = new_amdgpu_fb->obj;
+	new_rbo = gem_to_amdgpu_bo(obj);
+
+	/* pin the new buffer */
+	r = amdgpu_bo_reserve(new_rbo, false);
+	if (unlikely(r != 0)) {
+		DRM_ERROR("failed to reserve new rbo buffer before flip\n");
+		goto cleanup;
+	}
+
+	r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
+	if (unlikely(r != 0)) {
+		amdgpu_bo_unreserve(new_rbo);
+		r = -EINVAL;
+		DRM_ERROR("failed to pin new rbo buffer before flip\n");
+		goto cleanup;
+	}
+
+	work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
+	amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
+	amdgpu_bo_unreserve(new_rbo);
+
+	work->base = base;
+
+	r = drm_vblank_get(crtc->dev, amdgpu_crtc->crtc_id);
+	if (r) {
+		DRM_ERROR("failed to get vblank before flip\n");
+		goto pflip_cleanup;
+	}
+
+	/* we borrow the event spin lock for protecting flip_wrok */
+	spin_lock_irqsave(&crtc->dev->event_lock, flags);
+	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) {
+		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+		r = -EBUSY;
+		goto vblank_cleanup;
+	}
+
+	amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
+	amdgpu_crtc->pflip_works = work;
+
+	/* update crtc fb */
+	crtc->primary->fb = fb;
+	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+	queue_work(amdgpu_crtc->pflip_queue, &work->flip_work);
+	return 0;
+
+vblank_cleanup:
+	drm_vblank_put(crtc->dev, amdgpu_crtc->crtc_id);
+
+pflip_cleanup:
+	if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) {
+		DRM_ERROR("failed to reserve new rbo in error path\n");
+		goto cleanup;
+	}
+	if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) {
+		DRM_ERROR("failed to unpin new rbo in error path\n");
+	}
+	amdgpu_bo_unreserve(new_rbo);
+
+cleanup:
+	drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+	fence_put(work->fence);
+	kfree(work);
+
+	return r;
+}
+
+int amdgpu_crtc_set_config(struct drm_mode_set *set)
+{
+	struct drm_device *dev;
+	struct amdgpu_device *adev;
+	struct drm_crtc *crtc;
+	bool active = false;
+	int ret;
+
+	if (!set || !set->crtc)
+		return -EINVAL;
+
+	dev = set->crtc->dev;
+
+	ret = pm_runtime_get_sync(dev->dev);
+	if (ret < 0)
+		return ret;
+
+	ret = drm_crtc_helper_set_config(set);
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		if (crtc->enabled)
+			active = true;
+
+	pm_runtime_mark_last_busy(dev->dev);
+
+	adev = dev->dev_private;
+	/* if we have active crtcs and we don't have a power ref,
+	   take the current one */
+	if (active && !adev->have_disp_power_ref) {
+		adev->have_disp_power_ref = true;
+		return ret;
+	}
+	/* if we have no active crtcs, then drop the power ref
+	   we got before */
+	if (!active && adev->have_disp_power_ref) {
+		pm_runtime_put_autosuspend(dev->dev);
+		adev->have_disp_power_ref = false;
+	}
+
+	/* drop the power reference we got coming in here */
+	pm_runtime_put_autosuspend(dev->dev);
+	return ret;
+}
+
+static const char *encoder_names[38] = {
+	"NONE",
+	"INTERNAL_LVDS",
+	"INTERNAL_TMDS1",
+	"INTERNAL_TMDS2",
+	"INTERNAL_DAC1",
+	"INTERNAL_DAC2",
+	"INTERNAL_SDVOA",
+	"INTERNAL_SDVOB",
+	"SI170B",
+	"CH7303",
+	"CH7301",
+	"INTERNAL_DVO1",
+	"EXTERNAL_SDVOA",
+	"EXTERNAL_SDVOB",
+	"TITFP513",
+	"INTERNAL_LVTM1",
+	"VT1623",
+	"HDMI_SI1930",
+	"HDMI_INTERNAL",
+	"INTERNAL_KLDSCP_TMDS1",
+	"INTERNAL_KLDSCP_DVO1",
+	"INTERNAL_KLDSCP_DAC1",
+	"INTERNAL_KLDSCP_DAC2",
+	"SI178",
+	"MVPU_FPGA",
+	"INTERNAL_DDI",
+	"VT1625",
+	"HDMI_SI1932",
+	"DP_AN9801",
+	"DP_DP501",
+	"INTERNAL_UNIPHY",
+	"INTERNAL_KLDSCP_LVTMA",
+	"INTERNAL_UNIPHY1",
+	"INTERNAL_UNIPHY2",
+	"NUTMEG",
+	"TRAVIS",
+	"INTERNAL_VCE",
+	"INTERNAL_UNIPHY3",
+};
+
+static const char *hpd_names[6] = {
+	"HPD1",
+	"HPD2",
+	"HPD3",
+	"HPD4",
+	"HPD5",
+	"HPD6",
+};
+
+void amdgpu_print_display_setup(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+	struct amdgpu_connector *amdgpu_connector;
+	struct drm_encoder *encoder;
+	struct amdgpu_encoder *amdgpu_encoder;
+	uint32_t devices;
+	int i = 0;
+
+	DRM_INFO("AMDGPU Display Connectors\n");
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		amdgpu_connector = to_amdgpu_connector(connector);
+		DRM_INFO("Connector %d:\n", i);
+		DRM_INFO("  %s\n", connector->name);
+		if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE)
+			DRM_INFO("  %s\n", hpd_names[amdgpu_connector->hpd.hpd]);
+		if (amdgpu_connector->ddc_bus) {
+			DRM_INFO("  DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+				 amdgpu_connector->ddc_bus->rec.mask_clk_reg,
+				 amdgpu_connector->ddc_bus->rec.mask_data_reg,
+				 amdgpu_connector->ddc_bus->rec.a_clk_reg,
+				 amdgpu_connector->ddc_bus->rec.a_data_reg,
+				 amdgpu_connector->ddc_bus->rec.en_clk_reg,
+				 amdgpu_connector->ddc_bus->rec.en_data_reg,
+				 amdgpu_connector->ddc_bus->rec.y_clk_reg,
+				 amdgpu_connector->ddc_bus->rec.y_data_reg);
+			if (amdgpu_connector->router.ddc_valid)
+				DRM_INFO("  DDC Router 0x%x/0x%x\n",
+					 amdgpu_connector->router.ddc_mux_control_pin,
+					 amdgpu_connector->router.ddc_mux_state);
+			if (amdgpu_connector->router.cd_valid)
+				DRM_INFO("  Clock/Data Router 0x%x/0x%x\n",
+					 amdgpu_connector->router.cd_mux_control_pin,
+					 amdgpu_connector->router.cd_mux_state);
+		} else {
+			if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
+			    connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
+			    connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
+			    connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
+			    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+			    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
+				DRM_INFO("  DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
+		}
+		DRM_INFO("  Encoders:\n");
+		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+			amdgpu_encoder = to_amdgpu_encoder(encoder);
+			devices = amdgpu_encoder->devices & amdgpu_connector->devices;
+			if (devices) {
+				if (devices & ATOM_DEVICE_CRT1_SUPPORT)
+					DRM_INFO("    CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_CRT2_SUPPORT)
+					DRM_INFO("    CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_LCD1_SUPPORT)
+					DRM_INFO("    LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP1_SUPPORT)
+					DRM_INFO("    DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP2_SUPPORT)
+					DRM_INFO("    DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP3_SUPPORT)
+					DRM_INFO("    DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP4_SUPPORT)
+					DRM_INFO("    DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP5_SUPPORT)
+					DRM_INFO("    DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP6_SUPPORT)
+					DRM_INFO("    DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_TV1_SUPPORT)
+					DRM_INFO("    TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_CV_SUPPORT)
+					DRM_INFO("    CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
+			}
+		}
+		i++;
+	}
+}
+
+/**
+ * amdgpu_ddc_probe
+ *
+ */
+bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector,
+		       bool use_aux)
+{
+	u8 out = 0x0;
+	u8 buf[8];
+	int ret;
+	struct i2c_msg msgs[] = {
+		{
+			.addr = DDC_ADDR,
+			.flags = 0,
+			.len = 1,
+			.buf = &out,
+		},
+		{
+			.addr = DDC_ADDR,
+			.flags = I2C_M_RD,
+			.len = 8,
+			.buf = buf,
+		}
+	};
+
+	/* on hw with routers, select right port */
+	if (amdgpu_connector->router.ddc_valid)
+		amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
+
+	if (use_aux) {
+		ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
+	} else {
+		ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
+	}
+
+	if (ret != 2)
+		/* Couldn't find an accessible DDC on this connector */
+		return false;
+	/* Probe also for valid EDID header
+	 * EDID header starts with:
+	 * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
+	 * Only the first 6 bytes must be valid as
+	 * drm_edid_block_valid() can fix the last 2 bytes */
+	if (drm_edid_header_is_valid(buf) < 6) {
+		/* Couldn't find an accessible EDID on this
+		 * connector */
+		return false;
+	}
+	return true;
+}
+
+static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
+
+	if (amdgpu_fb->obj) {
+		drm_gem_object_unreference_unlocked(amdgpu_fb->obj);
+	}
+	drm_framebuffer_cleanup(fb);
+	kfree(amdgpu_fb);
+}
+
+static int amdgpu_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+						  struct drm_file *file_priv,
+						  unsigned int *handle)
+{
+	struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
+
+	return drm_gem_handle_create(file_priv, amdgpu_fb->obj, handle);
+}
+
+static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
+	.destroy = amdgpu_user_framebuffer_destroy,
+	.create_handle = amdgpu_user_framebuffer_create_handle,
+};
+
+int
+amdgpu_framebuffer_init(struct drm_device *dev,
+			struct amdgpu_framebuffer *rfb,
+			struct drm_mode_fb_cmd2 *mode_cmd,
+			struct drm_gem_object *obj)
+{
+	int ret;
+	rfb->obj = obj;
+	drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
+	ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+	if (ret) {
+		rfb->obj = NULL;
+		return ret;
+	}
+	return 0;
+}
+
+static struct drm_framebuffer *
+amdgpu_user_framebuffer_create(struct drm_device *dev,
+			       struct drm_file *file_priv,
+			       struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_gem_object *obj;
+	struct amdgpu_framebuffer *amdgpu_fb;
+	int ret;
+
+	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+	if (obj ==  NULL) {
+		dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
+			"can't create framebuffer\n", mode_cmd->handles[0]);
+		return ERR_PTR(-ENOENT);
+	}
+
+	amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
+	if (amdgpu_fb == NULL) {
+		drm_gem_object_unreference_unlocked(obj);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
+	if (ret) {
+		kfree(amdgpu_fb);
+		drm_gem_object_unreference_unlocked(obj);
+		return ERR_PTR(ret);
+	}
+
+	return &amdgpu_fb->base;
+}
+
+static void amdgpu_output_poll_changed(struct drm_device *dev)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+	amdgpu_fb_output_poll_changed(adev);
+}
+
+const struct drm_mode_config_funcs amdgpu_mode_funcs = {
+	.fb_create = amdgpu_user_framebuffer_create,
+	.output_poll_changed = amdgpu_output_poll_changed
+};
+
+static struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
+{	{ UNDERSCAN_OFF, "off" },
+	{ UNDERSCAN_ON, "on" },
+	{ UNDERSCAN_AUTO, "auto" },
+};
+
+static struct drm_prop_enum_list amdgpu_audio_enum_list[] =
+{	{ AMDGPU_AUDIO_DISABLE, "off" },
+	{ AMDGPU_AUDIO_ENABLE, "on" },
+	{ AMDGPU_AUDIO_AUTO, "auto" },
+};
+
+/* XXX support different dither options? spatial, temporal, both, etc. */
+static struct drm_prop_enum_list amdgpu_dither_enum_list[] =
+{	{ AMDGPU_FMT_DITHER_DISABLE, "off" },
+	{ AMDGPU_FMT_DITHER_ENABLE, "on" },
+};
+
+int amdgpu_modeset_create_props(struct amdgpu_device *adev)
+{
+	int sz;
+
+	if (adev->is_atom_bios) {
+		adev->mode_info.coherent_mode_property =
+			drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1);
+		if (!adev->mode_info.coherent_mode_property)
+			return -ENOMEM;
+	}
+
+	adev->mode_info.load_detect_property =
+		drm_property_create_range(adev->ddev, 0, "load detection", 0, 1);
+	if (!adev->mode_info.load_detect_property)
+		return -ENOMEM;
+
+	drm_mode_create_scaling_mode_property(adev->ddev);
+
+	sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
+	adev->mode_info.underscan_property =
+		drm_property_create_enum(adev->ddev, 0,
+				    "underscan",
+				    amdgpu_underscan_enum_list, sz);
+
+	adev->mode_info.underscan_hborder_property =
+		drm_property_create_range(adev->ddev, 0,
+					"underscan hborder", 0, 128);
+	if (!adev->mode_info.underscan_hborder_property)
+		return -ENOMEM;
+
+	adev->mode_info.underscan_vborder_property =
+		drm_property_create_range(adev->ddev, 0,
+					"underscan vborder", 0, 128);
+	if (!adev->mode_info.underscan_vborder_property)
+		return -ENOMEM;
+
+	sz = ARRAY_SIZE(amdgpu_audio_enum_list);
+	adev->mode_info.audio_property =
+		drm_property_create_enum(adev->ddev, 0,
+					 "audio",
+					 amdgpu_audio_enum_list, sz);
+
+	sz = ARRAY_SIZE(amdgpu_dither_enum_list);
+	adev->mode_info.dither_property =
+		drm_property_create_enum(adev->ddev, 0,
+					 "dither",
+					 amdgpu_dither_enum_list, sz);
+
+	return 0;
+}
+
+void amdgpu_update_display_priority(struct amdgpu_device *adev)
+{
+	/* adjustment options for the display watermarks */
+	if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
+		adev->mode_info.disp_priority = 0;
+	else
+		adev->mode_info.disp_priority = amdgpu_disp_priority;
+
+}
+
+static bool is_hdtv_mode(const struct drm_display_mode *mode)
+{
+	/* try and guess if this is a tv or a monitor */
+	if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
+	    (mode->vdisplay == 576) || /* 576p */
+	    (mode->vdisplay == 720) || /* 720p */
+	    (mode->vdisplay == 1080)) /* 1080p */
+		return true;
+	else
+		return false;
+}
+
+bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
+				    const struct drm_display_mode *mode,
+				    struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_encoder *encoder;
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct amdgpu_encoder *amdgpu_encoder;
+	struct drm_connector *connector;
+	struct amdgpu_connector *amdgpu_connector;
+	u32 src_v = 1, dst_v = 1;
+	u32 src_h = 1, dst_h = 1;
+
+	amdgpu_crtc->h_border = 0;
+	amdgpu_crtc->v_border = 0;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
+		amdgpu_encoder = to_amdgpu_encoder(encoder);
+		connector = amdgpu_get_connector_for_encoder(encoder);
+		amdgpu_connector = to_amdgpu_connector(connector);
+
+		/* set scaling */
+		if (amdgpu_encoder->rmx_type == RMX_OFF)
+			amdgpu_crtc->rmx_type = RMX_OFF;
+		else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay ||
+			 mode->vdisplay < amdgpu_encoder->native_mode.vdisplay)
+			amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type;
+		else
+			amdgpu_crtc->rmx_type = RMX_OFF;
+		/* copy native mode */
+		memcpy(&amdgpu_crtc->native_mode,
+		       &amdgpu_encoder->native_mode,
+		       sizeof(struct drm_display_mode));
+		src_v = crtc->mode.vdisplay;
+		dst_v = amdgpu_crtc->native_mode.vdisplay;
+		src_h = crtc->mode.hdisplay;
+		dst_h = amdgpu_crtc->native_mode.hdisplay;
+
+		/* fix up for overscan on hdmi */
+		if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
+		    ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
+		     ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
+		      drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
+		      is_hdtv_mode(mode)))) {
+			if (amdgpu_encoder->underscan_hborder != 0)
+				amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
+			else
+				amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16;
+			if (amdgpu_encoder->underscan_vborder != 0)
+				amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder;
+			else
+				amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16;
+			amdgpu_crtc->rmx_type = RMX_FULL;
+			src_v = crtc->mode.vdisplay;
+			dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2);
+			src_h = crtc->mode.hdisplay;
+			dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2);
+		}
+	}
+	if (amdgpu_crtc->rmx_type != RMX_OFF) {
+		fixed20_12 a, b;
+		a.full = dfixed_const(src_v);
+		b.full = dfixed_const(dst_v);
+		amdgpu_crtc->vsc.full = dfixed_div(a, b);
+		a.full = dfixed_const(src_h);
+		b.full = dfixed_const(dst_h);
+		amdgpu_crtc->hsc.full = dfixed_div(a, b);
+	} else {
+		amdgpu_crtc->vsc.full = dfixed_const(1);
+		amdgpu_crtc->hsc.full = dfixed_const(1);
+	}
+	return true;
+}
+
+/*
+ * Retrieve current video scanout position of crtc on a given gpu, and
+ * an optional accurate timestamp of when query happened.
+ *
+ * \param dev Device to query.
+ * \param crtc Crtc to query.
+ * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
+ * \param *vpos Location where vertical scanout position should be stored.
+ * \param *hpos Location where horizontal scanout position should go.
+ * \param *stime Target location for timestamp taken immediately before
+ *               scanout position query. Can be NULL to skip timestamp.
+ * \param *etime Target location for timestamp taken immediately after
+ *               scanout position query. Can be NULL to skip timestamp.
+ *
+ * Returns vpos as a positive number while in active scanout area.
+ * Returns vpos as a negative number inside vblank, counting the number
+ * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+ * until start of active scanout / end of vblank."
+ *
+ * \return Flags, or'ed together as follows:
+ *
+ * DRM_SCANOUTPOS_VALID = Query successful.
+ * DRM_SCANOUTPOS_INVBL = Inside vblank.
+ * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
+ * this flag means that returned position may be offset by a constant but
+ * unknown small number of scanlines wrt. real scanout position.
+ *
+ */
+int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags,
+			       int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
+{
+	u32 vbl = 0, position = 0;
+	int vbl_start, vbl_end, vtotal, ret = 0;
+	bool in_vbl = true;
+
+	struct amdgpu_device *adev = dev->dev_private;
+
+	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
+
+	/* Get optional system timestamp before query. */
+	if (stime)
+		*stime = ktime_get();
+
+	if (amdgpu_display_page_flip_get_scanoutpos(adev, crtc, &vbl, &position) == 0)
+		ret |= DRM_SCANOUTPOS_VALID;
+
+	/* Get optional system timestamp after query. */
+	if (etime)
+		*etime = ktime_get();
+
+	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
+
+	/* Decode into vertical and horizontal scanout position. */
+	*vpos = position & 0x1fff;
+	*hpos = (position >> 16) & 0x1fff;
+
+	/* Valid vblank area boundaries from gpu retrieved? */
+	if (vbl > 0) {
+		/* Yes: Decode. */
+		ret |= DRM_SCANOUTPOS_ACCURATE;
+		vbl_start = vbl & 0x1fff;
+		vbl_end = (vbl >> 16) & 0x1fff;
+	}
+	else {
+		/* No: Fake something reasonable which gives at least ok results. */
+		vbl_start = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
+		vbl_end = 0;
+	}
+
+	/* Test scanout position against vblank region. */
+	if ((*vpos < vbl_start) && (*vpos >= vbl_end))
+		in_vbl = false;
+
+	/* Check if inside vblank area and apply corrective offsets:
+	 * vpos will then be >=0 in video scanout area, but negative
+	 * within vblank area, counting down the number of lines until
+	 * start of scanout.
+	 */
+
+	/* Inside "upper part" of vblank area? Apply corrective offset if so: */
+	if (in_vbl && (*vpos >= vbl_start)) {
+		vtotal = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
+		*vpos = *vpos - vtotal;
+	}
+
+	/* Correct for shifted end of vbl at vbl_end. */
+	*vpos = *vpos - vbl_end;
+
+	/* In vblank? */
+	if (in_vbl)
+		ret |= DRM_SCANOUTPOS_IN_VBLANK;
+
+	/* Is vpos outside nominal vblank area, but less than
+	 * 1/100 of a frame height away from start of vblank?
+	 * If so, assume this isn't a massively delayed vblank
+	 * interrupt, but a vblank interrupt that fired a few
+	 * microseconds before true start of vblank. Compensate
+	 * by adding a full frame duration to the final timestamp.
+	 * Happens, e.g., on ATI R500, R600.
+	 *
+	 * We only do this if DRM_CALLED_FROM_VBLIRQ.
+	 */
+	if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
+		vbl_start = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
+		vtotal = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
+
+		if (vbl_start - *vpos < vtotal / 100) {
+			*vpos -= vtotal;
+
+			/* Signal this correction as "applied". */
+			ret |= 0x8;
+		}
+	}
+
+	return ret;
+}
+
+int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
+{
+	if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
+		return AMDGPU_CRTC_IRQ_NONE;
+
+	switch (crtc) {
+	case 0:
+		return AMDGPU_CRTC_IRQ_VBLANK1;
+	case 1:
+		return AMDGPU_CRTC_IRQ_VBLANK2;
+	case 2:
+		return AMDGPU_CRTC_IRQ_VBLANK3;
+	case 3:
+		return AMDGPU_CRTC_IRQ_VBLANK4;
+	case 4:
+		return AMDGPU_CRTC_IRQ_VBLANK5;
+	case 5:
+		return AMDGPU_CRTC_IRQ_VBLANK6;
+	default:
+		return AMDGPU_CRTC_IRQ_NONE;
+	}
+}

+ 955 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c

@@ -0,0 +1,955 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "amdgpu_i2c.h"
+#include "amdgpu_dpm.h"
+#include "atom.h"
+
+void amdgpu_dpm_print_class_info(u32 class, u32 class2)
+{
+	printk("\tui class: ");
+	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
+	default:
+		printk("none\n");
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+		printk("battery\n");
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
+		printk("balanced\n");
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+		printk("performance\n");
+		break;
+	}
+	printk("\tinternal class: ");
+	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
+	    (class2 == 0))
+		printk("none");
+	else {
+		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+			printk("boot ");
+		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
+			printk("thermal ");
+		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
+			printk("limited_pwr ");
+		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
+			printk("rest ");
+		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
+			printk("forced ");
+		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+			printk("3d_perf ");
+		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
+			printk("ovrdrv ");
+		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+			printk("uvd ");
+		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
+			printk("3d_low ");
+		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
+			printk("acpi ");
+		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
+			printk("uvd_hd2 ");
+		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
+			printk("uvd_hd ");
+		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
+			printk("uvd_sd ");
+		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
+			printk("limited_pwr2 ");
+		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
+			printk("ulv ");
+		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
+			printk("uvd_mvc ");
+	}
+	printk("\n");
+}
+
+void amdgpu_dpm_print_cap_info(u32 caps)
+{
+	printk("\tcaps: ");
+	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
+		printk("single_disp ");
+	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
+		printk("video ");
+	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
+		printk("no_dc ");
+	printk("\n");
+}
+
+void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
+				struct amdgpu_ps *rps)
+{
+	printk("\tstatus: ");
+	if (rps == adev->pm.dpm.current_ps)
+		printk("c ");
+	if (rps == adev->pm.dpm.requested_ps)
+		printk("r ");
+	if (rps == adev->pm.dpm.boot_ps)
+		printk("b ");
+	printk("\n");
+}
+
+u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
+{
+	struct drm_device *dev = adev->ddev;
+	struct drm_crtc *crtc;
+	struct amdgpu_crtc *amdgpu_crtc;
+	u32 line_time_us, vblank_lines;
+	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
+
+	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
+		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+			amdgpu_crtc = to_amdgpu_crtc(crtc);
+			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
+				line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
+					amdgpu_crtc->hw_mode.clock;
+				vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
+					amdgpu_crtc->hw_mode.crtc_vdisplay +
+					(amdgpu_crtc->v_border * 2);
+				vblank_time_us = vblank_lines * line_time_us;
+				break;
+			}
+		}
+	}
+
+	return vblank_time_us;
+}
+
+u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
+{
+	struct drm_device *dev = adev->ddev;
+	struct drm_crtc *crtc;
+	struct amdgpu_crtc *amdgpu_crtc;
+	u32 vrefresh = 0;
+
+	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
+		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+			amdgpu_crtc = to_amdgpu_crtc(crtc);
+			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
+				vrefresh = amdgpu_crtc->hw_mode.vrefresh;
+				break;
+			}
+		}
+	}
+
+	return vrefresh;
+}
+
+void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
+			      u32 *p, u32 *u)
+{
+	u32 b_c = 0;
+	u32 i_c;
+	u32 tmp;
+
+	i_c = (i * r_c) / 100;
+	tmp = i_c >> p_b;
+
+	while (tmp) {
+		b_c++;
+		tmp >>= 1;
+	}
+
+	*u = (b_c + 1) / 2;
+	*p = i_c / (1 << (2 * (*u)));
+}
+
+int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
+{
+	u32 k, a, ah, al;
+	u32 t1;
+
+	if ((fl == 0) || (fh == 0) || (fl > fh))
+		return -EINVAL;
+
+	k = (100 * fh) / fl;
+	t1 = (t * (k - 100));
+	a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
+	a = (a + 5) / 10;
+	ah = ((a * t) + 5000) / 10000;
+	al = a - ah;
+
+	*th = t - ah;
+	*tl = t + al;
+
+	return 0;
+}
+
+bool amdgpu_is_uvd_state(u32 class, u32 class2)
+{
+	if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+		return true;
+	if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
+		return true;
+	if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
+		return true;
+	if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
+		return true;
+	if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
+		return true;
+	return false;
+}
+
+bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
+{
+	switch (sensor) {
+	case THERMAL_TYPE_RV6XX:
+	case THERMAL_TYPE_RV770:
+	case THERMAL_TYPE_EVERGREEN:
+	case THERMAL_TYPE_SUMO:
+	case THERMAL_TYPE_NI:
+	case THERMAL_TYPE_SI:
+	case THERMAL_TYPE_CI:
+	case THERMAL_TYPE_KV:
+		return true;
+	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
+	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
+		return false; /* need special handling */
+	case THERMAL_TYPE_NONE:
+	case THERMAL_TYPE_EXTERNAL:
+	case THERMAL_TYPE_EXTERNAL_GPIO:
+	default:
+		return false;
+	}
+}
+
+union power_info {
+	struct _ATOM_POWERPLAY_INFO info;
+	struct _ATOM_POWERPLAY_INFO_V2 info_2;
+	struct _ATOM_POWERPLAY_INFO_V3 info_3;
+	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
+	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
+};
+
+union fan_info {
+	struct _ATOM_PPLIB_FANTABLE fan;
+	struct _ATOM_PPLIB_FANTABLE2 fan2;
+	struct _ATOM_PPLIB_FANTABLE3 fan3;
+};
+
+static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
+					      ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
+{
+	u32 size = atom_table->ucNumEntries *
+		sizeof(struct amdgpu_clock_voltage_dependency_entry);
+	int i;
+	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
+
+	amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
+	if (!amdgpu_table->entries)
+		return -ENOMEM;
+
+	entry = &atom_table->entries[0];
+	for (i = 0; i < atom_table->ucNumEntries; i++) {
+		amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
+			(entry->ucClockHigh << 16);
+		amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
+		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
+			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
+	}
+	amdgpu_table->count = atom_table->ucNumEntries;
+
+	return 0;
+}
+
+int amdgpu_get_platform_caps(struct amdgpu_device *adev)
+{
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+
+	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return -EINVAL;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
+	adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
+	adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
+
+	return 0;
+}
+
+/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
+
+int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+{
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	union power_info *power_info;
+	union fan_info *fan_info;
+	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+	int ret, i;
+
+	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return -EINVAL;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	/* fan table */
+	if (le16_to_cpu(power_info->pplib.usTableSize) >=
+	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
+		if (power_info->pplib3.usFanTableOffset) {
+			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
+						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
+			adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
+			adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
+			adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
+			adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
+			adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
+			adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
+			adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
+			if (fan_info->fan.ucFanTableFormat >= 2)
+				adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
+			else
+				adev->pm.dpm.fan.t_max = 10900;
+			adev->pm.dpm.fan.cycle_delay = 100000;
+			if (fan_info->fan.ucFanTableFormat >= 3) {
+				adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
+				adev->pm.dpm.fan.default_max_fan_pwm =
+					le16_to_cpu(fan_info->fan3.usFanPWMMax);
+				adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
+				adev->pm.dpm.fan.fan_output_sensitivity =
+					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
+			}
+			adev->pm.dpm.fan.ucode_fan_control = true;
+		}
+	}
+
+	/* clock dependancy tables, shedding tables */
+	if (le16_to_cpu(power_info->pplib.usTableSize) >=
+	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
+		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
+			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
+			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+								 dep_table);
+			if (ret) {
+				amdgpu_free_extended_power_table(adev);
+				return ret;
+			}
+		}
+		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
+			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
+			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+								 dep_table);
+			if (ret) {
+				amdgpu_free_extended_power_table(adev);
+				return ret;
+			}
+		}
+		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
+			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
+			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+								 dep_table);
+			if (ret) {
+				amdgpu_free_extended_power_table(adev);
+				return ret;
+			}
+		}
+		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
+			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
+			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
+								 dep_table);
+			if (ret) {
+				amdgpu_free_extended_power_table(adev);
+				return ret;
+			}
+		}
+		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
+			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
+				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
+			if (clk_v->ucNumEntries) {
+				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
+					le16_to_cpu(clk_v->entries[0].usSclkLow) |
+					(clk_v->entries[0].ucSclkHigh << 16);
+				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
+					le16_to_cpu(clk_v->entries[0].usMclkLow) |
+					(clk_v->entries[0].ucMclkHigh << 16);
+				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
+					le16_to_cpu(clk_v->entries[0].usVddc);
+				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
+					le16_to_cpu(clk_v->entries[0].usVddci);
+			}
+		}
+		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
+			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
+				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
+			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
+
+			adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
+				kzalloc(psl->ucNumEntries *
+					sizeof(struct amdgpu_phase_shedding_limits_entry),
+					GFP_KERNEL);
+			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
+				amdgpu_free_extended_power_table(adev);
+				return -ENOMEM;
+			}
+
+			entry = &psl->entries[0];
+			for (i = 0; i < psl->ucNumEntries; i++) {
+				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
+					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
+				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
+					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
+				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
+					le16_to_cpu(entry->usVoltage);
+				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
+					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
+			}
+			adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
+				psl->ucNumEntries;
+		}
+	}
+
+	/* cac data */
+	if (le16_to_cpu(power_info->pplib.usTableSize) >=
+	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
+		adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
+		adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
+		adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
+		adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
+		if (adev->pm.dpm.tdp_od_limit)
+			adev->pm.dpm.power_control = true;
+		else
+			adev->pm.dpm.power_control = false;
+		adev->pm.dpm.tdp_adjustment = 0;
+		adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
+		adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
+		adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
+		if (power_info->pplib5.usCACLeakageTableOffset) {
+			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
+				(ATOM_PPLIB_CAC_Leakage_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
+			ATOM_PPLIB_CAC_Leakage_Record *entry;
+			u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
+			adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
+			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
+				amdgpu_free_extended_power_table(adev);
+				return -ENOMEM;
+			}
+			entry = &cac_table->entries[0];
+			for (i = 0; i < cac_table->ucNumEntries; i++) {
+				if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
+						le16_to_cpu(entry->usVddc1);
+					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
+						le16_to_cpu(entry->usVddc2);
+					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
+						le16_to_cpu(entry->usVddc3);
+				} else {
+					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
+						le16_to_cpu(entry->usVddc);
+					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
+						le32_to_cpu(entry->ulLeakageValue);
+				}
+				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
+					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
+			}
+			adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
+		}
+	}
+
+	/* ext tables */
+	if (le16_to_cpu(power_info->pplib.usTableSize) >=
+	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
+		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
+			(mode_info->atom_context->bios + data_offset +
+			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
+		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
+			ext_hdr->usVCETableOffset) {
+			VCEClockInfoArray *array = (VCEClockInfoArray *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
+			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
+				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
+				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
+			ATOM_PPLIB_VCE_State_Table *states =
+				(ATOM_PPLIB_VCE_State_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
+				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
+				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
+			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
+			ATOM_PPLIB_VCE_State_Record *state_entry;
+			VCEClockInfo *vce_clk;
+			u32 size = limits->numEntries *
+				sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
+			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
+				kzalloc(size, GFP_KERNEL);
+			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
+				amdgpu_free_extended_power_table(adev);
+				return -ENOMEM;
+			}
+			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
+				limits->numEntries;
+			entry = &limits->entries[0];
+			state_entry = &states->entries[0];
+			for (i = 0; i < limits->numEntries; i++) {
+				vce_clk = (VCEClockInfo *)
+					((u8 *)&array->entries[0] +
+					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
+				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
+					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
+				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
+					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
+				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
+					le16_to_cpu(entry->usVoltage);
+				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
+					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
+			}
+			for (i = 0; i < states->numEntries; i++) {
+				if (i >= AMDGPU_MAX_VCE_LEVELS)
+					break;
+				vce_clk = (VCEClockInfo *)
+					((u8 *)&array->entries[0] +
+					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
+				adev->pm.dpm.vce_states[i].evclk =
+					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
+				adev->pm.dpm.vce_states[i].ecclk =
+					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
+				adev->pm.dpm.vce_states[i].clk_idx =
+					state_entry->ucClockInfoIndex & 0x3f;
+				adev->pm.dpm.vce_states[i].pstate =
+					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
+				state_entry = (ATOM_PPLIB_VCE_State_Record *)
+					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
+			}
+		}
+		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
+			ext_hdr->usUVDTableOffset) {
+			UVDClockInfoArray *array = (UVDClockInfoArray *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
+			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
+				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
+				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
+			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
+			u32 size = limits->numEntries *
+				sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
+			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
+				kzalloc(size, GFP_KERNEL);
+			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
+				amdgpu_free_extended_power_table(adev);
+				return -ENOMEM;
+			}
+			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
+				limits->numEntries;
+			entry = &limits->entries[0];
+			for (i = 0; i < limits->numEntries; i++) {
+				UVDClockInfo *uvd_clk = (UVDClockInfo *)
+					((u8 *)&array->entries[0] +
+					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
+				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
+					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
+				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
+					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
+				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
+					le16_to_cpu(entry->usVoltage);
+				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
+					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
+			}
+		}
+		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
+			ext_hdr->usSAMUTableOffset) {
+			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
+				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
+			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
+			u32 size = limits->numEntries *
+				sizeof(struct amdgpu_clock_voltage_dependency_entry);
+			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
+				kzalloc(size, GFP_KERNEL);
+			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
+				amdgpu_free_extended_power_table(adev);
+				return -ENOMEM;
+			}
+			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
+				limits->numEntries;
+			entry = &limits->entries[0];
+			for (i = 0; i < limits->numEntries; i++) {
+				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
+					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
+				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
+					le16_to_cpu(entry->usVoltage);
+				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
+					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
+			}
+		}
+		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
+		    ext_hdr->usPPMTableOffset) {
+			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(ext_hdr->usPPMTableOffset));
+			adev->pm.dpm.dyn_state.ppm_table =
+				kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
+			if (!adev->pm.dpm.dyn_state.ppm_table) {
+				amdgpu_free_extended_power_table(adev);
+				return -ENOMEM;
+			}
+			adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
+			adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
+				le16_to_cpu(ppm->usCpuCoreNumber);
+			adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
+				le32_to_cpu(ppm->ulPlatformTDP);
+			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
+				le32_to_cpu(ppm->ulSmallACPlatformTDP);
+			adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
+				le32_to_cpu(ppm->ulPlatformTDC);
+			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
+				le32_to_cpu(ppm->ulSmallACPlatformTDC);
+			adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
+				le32_to_cpu(ppm->ulApuTDP);
+			adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
+				le32_to_cpu(ppm->ulDGpuTDP);
+			adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
+				le32_to_cpu(ppm->ulDGpuUlvPower);
+			adev->pm.dpm.dyn_state.ppm_table->tj_max =
+				le32_to_cpu(ppm->ulTjmax);
+		}
+		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
+			ext_hdr->usACPTableOffset) {
+			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
+				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
+			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
+			u32 size = limits->numEntries *
+				sizeof(struct amdgpu_clock_voltage_dependency_entry);
+			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
+				kzalloc(size, GFP_KERNEL);
+			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
+				amdgpu_free_extended_power_table(adev);
+				return -ENOMEM;
+			}
+			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
+				limits->numEntries;
+			entry = &limits->entries[0];
+			for (i = 0; i < limits->numEntries; i++) {
+				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
+					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
+				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
+					le16_to_cpu(entry->usVoltage);
+				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
+					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
+			}
+		}
+		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
+			ext_hdr->usPowerTuneTableOffset) {
+			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
+					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+			ATOM_PowerTune_Table *pt;
+			adev->pm.dpm.dyn_state.cac_tdp_table =
+				kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
+			if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
+				amdgpu_free_extended_power_table(adev);
+				return -ENOMEM;
+			}
+			if (rev > 0) {
+				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
+					(mode_info->atom_context->bios + data_offset +
+					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
+					ppt->usMaximumPowerDeliveryLimit;
+				pt = &ppt->power_tune_table;
+			} else {
+				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
+					(mode_info->atom_context->bios + data_offset +
+					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
+				pt = &ppt->power_tune_table;
+			}
+			adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
+			adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
+				le16_to_cpu(pt->usConfigurableTDP);
+			adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
+			adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
+				le16_to_cpu(pt->usBatteryPowerLimit);
+			adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
+				le16_to_cpu(pt->usSmallPowerLimit);
+			adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
+				le16_to_cpu(pt->usLowCACLeakage);
+			adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
+				le16_to_cpu(pt->usHighCACLeakage);
+		}
+		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
+				ext_hdr->usSclkVddgfxTableOffset) {
+			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
+			ret = amdgpu_parse_clk_voltage_dep_table(
+					&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
+					dep_table);
+			if (ret) {
+				kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
+				return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
+void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
+{
+	struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
+
+	kfree(dyn_state->vddc_dependency_on_sclk.entries);
+	kfree(dyn_state->vddci_dependency_on_mclk.entries);
+	kfree(dyn_state->vddc_dependency_on_mclk.entries);
+	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
+	kfree(dyn_state->cac_leakage_table.entries);
+	kfree(dyn_state->phase_shedding_limits_table.entries);
+	kfree(dyn_state->ppm_table);
+	kfree(dyn_state->cac_tdp_table);
+	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
+	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
+	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
+	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
+	kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
+}
+
+static const char *pp_lib_thermal_controller_names[] = {
+	"NONE",
+	"lm63",
+	"adm1032",
+	"adm1030",
+	"max6649",
+	"lm64",
+	"f75375",
+	"RV6xx",
+	"RV770",
+	"adt7473",
+	"NONE",
+	"External GPIO",
+	"Evergreen",
+	"emc2103",
+	"Sumo",
+	"Northern Islands",
+	"Southern Islands",
+	"lm96163",
+	"Sea Islands",
+	"Kaveri/Kabini",
+};
+
+void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
+{
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	ATOM_PPLIB_POWERPLAYTABLE *power_table;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	ATOM_PPLIB_THERMALCONTROLLER *controller;
+	struct amdgpu_i2c_bus_rec i2c_bus;
+	u16 data_offset;
+	u8 frev, crev;
+
+	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return;
+	power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
+		(mode_info->atom_context->bios + data_offset);
+	controller = &power_table->sThermalController;
+
+	/* add the i2c bus for thermal/fan chip */
+	if (controller->ucType > 0) {
+		if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
+			adev->pm.no_fan = true;
+		adev->pm.fan_pulses_per_revolution =
+			controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
+		if (adev->pm.fan_pulses_per_revolution) {
+			adev->pm.fan_min_rpm = controller->ucFanMinRPM;
+			adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
+		}
+		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			adev->pm.int_thermal_type = THERMAL_TYPE_NI;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			adev->pm.int_thermal_type = THERMAL_TYPE_SI;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			adev->pm.int_thermal_type = THERMAL_TYPE_CI;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			adev->pm.int_thermal_type = THERMAL_TYPE_KV;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
+			DRM_INFO("External GPIO thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
+		} else if (controller->ucType ==
+			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
+			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
+		} else if (controller->ucType ==
+			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
+			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
+		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
+			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
+				 pp_lib_thermal_controller_names[controller->ucType],
+				 controller->ucI2cAddress >> 1,
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
+			i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
+			adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
+			if (adev->pm.i2c_bus) {
+				struct i2c_board_info info = { };
+				const char *name = pp_lib_thermal_controller_names[controller->ucType];
+				info.addr = controller->ucI2cAddress >> 1;
+				strlcpy(info.type, name, sizeof(info.type));
+				i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
+			}
+		} else {
+			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
+				 controller->ucType,
+				 controller->ucI2cAddress >> 1,
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+		}
+	}
+}
+
+enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
+						 u32 sys_mask,
+						 enum amdgpu_pcie_gen asic_gen,
+						 enum amdgpu_pcie_gen default_gen)
+{
+	switch (asic_gen) {
+	case AMDGPU_PCIE_GEN1:
+		return AMDGPU_PCIE_GEN1;
+	case AMDGPU_PCIE_GEN2:
+		return AMDGPU_PCIE_GEN2;
+	case AMDGPU_PCIE_GEN3:
+		return AMDGPU_PCIE_GEN3;
+	default:
+		if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
+			return AMDGPU_PCIE_GEN3;
+		else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
+			return AMDGPU_PCIE_GEN2;
+		else
+			return AMDGPU_PCIE_GEN1;
+	}
+	return AMDGPU_PCIE_GEN1;
+}
+
+u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
+				 u16 asic_lanes,
+				 u16 default_lanes)
+{
+	switch (asic_lanes) {
+	case 0:
+	default:
+		return default_lanes;
+	case 1:
+		return 1;
+	case 2:
+		return 2;
+	case 4:
+		return 4;
+	case 8:
+		return 8;
+	case 12:
+		return 12;
+	case 16:
+		return 16;
+	}
+}
+
+u8 amdgpu_encode_pci_lane_width(u32 lanes)
+{
+	u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
+
+	if (lanes > 16)
+		return 0;
+
+	return encoded_lanes[lanes];
+}

+ 85 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h

@@ -0,0 +1,85 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_DPM_H__
+#define __AMDGPU_DPM_H__
+
+#define R600_SSTU_DFLT                               0
+#define R600_SST_DFLT                                0x00C8
+
+/* XXX are these ok? */
+#define R600_TEMP_RANGE_MIN (90 * 1000)
+#define R600_TEMP_RANGE_MAX (120 * 1000)
+
+#define FDO_PWM_MODE_STATIC  1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
+enum amdgpu_td {
+	AMDGPU_TD_AUTO,
+	AMDGPU_TD_UP,
+	AMDGPU_TD_DOWN,
+};
+
+enum amdgpu_display_watermark {
+	AMDGPU_DISPLAY_WATERMARK_LOW = 0,
+	AMDGPU_DISPLAY_WATERMARK_HIGH = 1,
+};
+
+enum amdgpu_display_gap
+{
+    AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
+    AMDGPU_PM_DISPLAY_GAP_VBLANK       = 1,
+    AMDGPU_PM_DISPLAY_GAP_WATERMARK    = 2,
+    AMDGPU_PM_DISPLAY_GAP_IGNORE       = 3,
+};
+
+void amdgpu_dpm_print_class_info(u32 class, u32 class2);
+void amdgpu_dpm_print_cap_info(u32 caps);
+void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
+				struct amdgpu_ps *rps);
+u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
+u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
+bool amdgpu_is_uvd_state(u32 class, u32 class2);
+void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
+			      u32 *p, u32 *u);
+int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th);
+
+bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor);
+
+int amdgpu_get_platform_caps(struct amdgpu_device *adev);
+
+int amdgpu_parse_extended_power_table(struct amdgpu_device *adev);
+void amdgpu_free_extended_power_table(struct amdgpu_device *adev);
+
+void amdgpu_add_thermal_controller(struct amdgpu_device *adev);
+
+enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
+						 u32 sys_mask,
+						 enum amdgpu_pcie_gen asic_gen,
+						 enum amdgpu_pcie_gen default_gen);
+
+u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
+				 u16 asic_lanes,
+				 u16 default_lanes);
+u8 amdgpu_encode_pci_lane_width(u32 lanes);
+
+#endif

+ 545 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c

@@ -0,0 +1,545 @@
+/**
+ * \file amdgpu_drv.c
+ * AMD Amdgpu driver
+ *
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
+#include <drm/drm_gem.h>
+#include "amdgpu_drv.h"
+
+#include <drm/drm_pciids.h>
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/vga_switcheroo.h>
+#include "drm_crtc_helper.h"
+
+#include "amdgpu.h"
+#include "amdgpu_irq.h"
+
+/*
+ * KMS wrapper.
+ * - 3.0.0 - initial driver
+ */
+#define KMS_DRIVER_MAJOR	3
+#define KMS_DRIVER_MINOR	0
+#define KMS_DRIVER_PATCHLEVEL	0
+
+int amdgpu_vram_limit = 0;
+int amdgpu_gart_size = -1; /* auto */
+int amdgpu_benchmarking = 0;
+int amdgpu_testing = 0;
+int amdgpu_audio = -1;
+int amdgpu_disp_priority = 0;
+int amdgpu_hw_i2c = 0;
+int amdgpu_pcie_gen2 = -1;
+int amdgpu_msi = -1;
+int amdgpu_lockup_timeout = 10000;
+int amdgpu_dpm = -1;
+int amdgpu_smc_load_fw = 1;
+int amdgpu_aspm = -1;
+int amdgpu_runtime_pm = -1;
+int amdgpu_hard_reset = 0;
+unsigned amdgpu_ip_block_mask = 0xffffffff;
+int amdgpu_bapm = -1;
+int amdgpu_deep_color = 0;
+int amdgpu_vm_size = 8;
+int amdgpu_vm_block_size = -1;
+int amdgpu_exp_hw_support = 0;
+
+MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
+module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
+
+MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)");
+module_param_named(gartsize, amdgpu_gart_size, int, 0600);
+
+MODULE_PARM_DESC(benchmark, "Run benchmark");
+module_param_named(benchmark, amdgpu_benchmarking, int, 0444);
+
+MODULE_PARM_DESC(test, "Run tests");
+module_param_named(test, amdgpu_testing, int, 0444);
+
+MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
+module_param_named(audio, amdgpu_audio, int, 0444);
+
+MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
+module_param_named(disp_priority, amdgpu_disp_priority, int, 0444);
+
+MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
+module_param_named(hw_i2c, amdgpu_hw_i2c, int, 0444);
+
+MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)");
+module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
+
+MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(msi, amdgpu_msi, int, 0444);
+
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
+module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
+
+MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(dpm, amdgpu_dpm, int, 0444);
+
+MODULE_PARM_DESC(smc_load_fw, "SMC firmware loading(1 = enable, 0 = disable)");
+module_param_named(smc_load_fw, amdgpu_smc_load_fw, int, 0444);
+
+MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(aspm, amdgpu_aspm, int, 0444);
+
+MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
+module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
+
+MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
+module_param_named(hard_reset, amdgpu_hard_reset, int, 0444);
+
+MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))");
+module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
+
+MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(bapm, amdgpu_bapm, int, 0444);
+
+MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
+module_param_named(deep_color, amdgpu_deep_color, int, 0444);
+
+MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 8GB)");
+module_param_named(vm_size, amdgpu_vm_size, int, 0444);
+
+MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
+module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
+
+MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
+module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
+
+static struct pci_device_id pciidlist[] = {
+#ifdef CONFIG_DRM_AMDGPU_CIK
+	/* Kaveri */
+	{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
+	{0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
+	{0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
+	{0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
+	{0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
+	{0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
+	{0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
+	{0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
+	{0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
+	{0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
+	{0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
+	{0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
+	/* Bonaire */
+	{0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
+	{0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
+	{0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
+	{0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
+	{0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
+	{0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
+	{0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
+	{0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
+	{0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
+	{0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
+	{0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
+	/* Hawaii */
+	{0x1002, 0x67A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
+	{0x1002, 0x67A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
+	{0x1002, 0x67A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
+	{0x1002, 0x67A8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
+	{0x1002, 0x67A9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
+	{0x1002, 0x67AA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
+	{0x1002, 0x67B0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
+	{0x1002, 0x67B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
+	{0x1002, 0x67B8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
+	{0x1002, 0x67B9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
+	{0x1002, 0x67BA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
+	{0x1002, 0x67BE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
+	/* Kabini */
+	{0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
+	{0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
+	{0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
+	{0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
+	{0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
+	{0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
+	{0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
+	{0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
+	{0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
+	/* mullins */
+	{0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+#endif
+	/* topaz */
+	{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+	{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+	{0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+	{0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+	{0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+	/* tonga */
+	{0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
+	{0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
+	{0x1002, 0x6928, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
+	{0x1002, 0x6929, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
+	{0x1002, 0x692B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
+	{0x1002, 0x692F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
+	{0x1002, 0x6930, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
+	{0x1002, 0x6938, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
+	{0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
+	/* carrizo */
+	{0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
+	{0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
+	{0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
+	{0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
+	{0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
+
+	{0, 0, 0}
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static struct drm_driver kms_driver;
+
+static int amdgpu_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+	struct apertures_struct *ap;
+	bool primary = false;
+
+	ap = alloc_apertures(1);
+	if (!ap)
+		return -ENOMEM;
+
+	ap->ranges[0].base = pci_resource_start(pdev, 0);
+	ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+	primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+	remove_conflicting_framebuffers(ap, "amdgpudrmfb", primary);
+	kfree(ap);
+
+	return 0;
+}
+
+static int amdgpu_pci_probe(struct pci_dev *pdev,
+			    const struct pci_device_id *ent)
+{
+	unsigned long flags = ent->driver_data;
+	int ret;
+
+	if ((flags & AMDGPU_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
+		DRM_INFO("This hardware requires experimental hardware support.\n"
+			 "See modparam exp_hw_support\n");
+		return -ENODEV;
+	}
+
+	/* Get rid of things like offb */
+	ret = amdgpu_kick_out_firmware_fb(pdev);
+	if (ret)
+		return ret;
+
+	return drm_get_pci_dev(pdev, ent, &kms_driver);
+}
+
+static void
+amdgpu_pci_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	drm_put_dev(dev);
+}
+
+static int amdgpu_pmops_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	return amdgpu_suspend_kms(drm_dev, true, true);
+}
+
+static int amdgpu_pmops_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	return amdgpu_resume_kms(drm_dev, true, true);
+}
+
+static int amdgpu_pmops_freeze(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	return amdgpu_suspend_kms(drm_dev, false, true);
+}
+
+static int amdgpu_pmops_thaw(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	return amdgpu_resume_kms(drm_dev, false, true);
+}
+
+static int amdgpu_pmops_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	int ret;
+
+	if (!amdgpu_device_is_px(drm_dev)) {
+		pm_runtime_forbid(dev);
+		return -EBUSY;
+	}
+
+	drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+	drm_kms_helper_poll_disable(drm_dev);
+	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
+
+	ret = amdgpu_suspend_kms(drm_dev, false, false);
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_ignore_hotplug(pdev);
+	pci_set_power_state(pdev, PCI_D3cold);
+	drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+
+	return 0;
+}
+
+static int amdgpu_pmops_runtime_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	int ret;
+
+	if (!amdgpu_device_is_px(drm_dev))
+		return -EINVAL;
+
+	drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	ret = pci_enable_device(pdev);
+	if (ret)
+		return ret;
+	pci_set_master(pdev);
+
+	ret = amdgpu_resume_kms(drm_dev, false, false);
+	drm_kms_helper_poll_enable(drm_dev);
+	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
+	drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+	return 0;
+}
+
+static int amdgpu_pmops_runtime_idle(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	struct drm_crtc *crtc;
+
+	if (!amdgpu_device_is_px(drm_dev)) {
+		pm_runtime_forbid(dev);
+		return -EBUSY;
+	}
+
+	list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
+		if (crtc->enabled) {
+			DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
+			return -EBUSY;
+		}
+	}
+
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_autosuspend(dev);
+	/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
+	return 1;
+}
+
+long amdgpu_drm_ioctl(struct file *filp,
+		      unsigned int cmd, unsigned long arg)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct drm_device *dev;
+	long ret;
+	dev = file_priv->minor->dev;
+	ret = pm_runtime_get_sync(dev->dev);
+	if (ret < 0)
+		return ret;
+
+	ret = drm_ioctl(filp, cmd, arg);
+
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
+	return ret;
+}
+
+static const struct dev_pm_ops amdgpu_pm_ops = {
+	.suspend = amdgpu_pmops_suspend,
+	.resume = amdgpu_pmops_resume,
+	.freeze = amdgpu_pmops_freeze,
+	.thaw = amdgpu_pmops_thaw,
+	.poweroff = amdgpu_pmops_freeze,
+	.restore = amdgpu_pmops_resume,
+	.runtime_suspend = amdgpu_pmops_runtime_suspend,
+	.runtime_resume = amdgpu_pmops_runtime_resume,
+	.runtime_idle = amdgpu_pmops_runtime_idle,
+};
+
+static const struct file_operations amdgpu_driver_kms_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = amdgpu_drm_ioctl,
+	.mmap = amdgpu_mmap,
+	.poll = drm_poll,
+	.read = drm_read,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = amdgpu_kms_compat_ioctl,
+#endif
+};
+
+static struct drm_driver kms_driver = {
+	.driver_features =
+	    DRIVER_USE_AGP |
+	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
+	    DRIVER_PRIME | DRIVER_RENDER,
+	.dev_priv_size = 0,
+	.load = amdgpu_driver_load_kms,
+	.open = amdgpu_driver_open_kms,
+	.preclose = amdgpu_driver_preclose_kms,
+	.postclose = amdgpu_driver_postclose_kms,
+	.lastclose = amdgpu_driver_lastclose_kms,
+	.set_busid = drm_pci_set_busid,
+	.unload = amdgpu_driver_unload_kms,
+	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
+	.enable_vblank = amdgpu_enable_vblank_kms,
+	.disable_vblank = amdgpu_disable_vblank_kms,
+	.get_vblank_timestamp = amdgpu_get_vblank_timestamp_kms,
+	.get_scanout_position = amdgpu_get_crtc_scanoutpos,
+#if defined(CONFIG_DEBUG_FS)
+	.debugfs_init = amdgpu_debugfs_init,
+	.debugfs_cleanup = amdgpu_debugfs_cleanup,
+#endif
+	.irq_preinstall = amdgpu_irq_preinstall,
+	.irq_postinstall = amdgpu_irq_postinstall,
+	.irq_uninstall = amdgpu_irq_uninstall,
+	.irq_handler = amdgpu_irq_handler,
+	.ioctls = amdgpu_ioctls_kms,
+	.gem_free_object = amdgpu_gem_object_free,
+	.gem_open_object = amdgpu_gem_object_open,
+	.gem_close_object = amdgpu_gem_object_close,
+	.dumb_create = amdgpu_mode_dumb_create,
+	.dumb_map_offset = amdgpu_mode_dumb_mmap,
+	.dumb_destroy = drm_gem_dumb_destroy,
+	.fops = &amdgpu_driver_kms_fops,
+
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_export = amdgpu_gem_prime_export,
+	.gem_prime_import = drm_gem_prime_import,
+	.gem_prime_pin = amdgpu_gem_prime_pin,
+	.gem_prime_unpin = amdgpu_gem_prime_unpin,
+	.gem_prime_res_obj = amdgpu_gem_prime_res_obj,
+	.gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table,
+	.gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
+	.gem_prime_vmap = amdgpu_gem_prime_vmap,
+	.gem_prime_vunmap = amdgpu_gem_prime_vunmap,
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = KMS_DRIVER_MAJOR,
+	.minor = KMS_DRIVER_MINOR,
+	.patchlevel = KMS_DRIVER_PATCHLEVEL,
+};
+
+static struct drm_driver *driver;
+static struct pci_driver *pdriver;
+
+static struct pci_driver amdgpu_kms_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+	.probe = amdgpu_pci_probe,
+	.remove = amdgpu_pci_remove,
+	.driver.pm = &amdgpu_pm_ops,
+};
+
+static int __init amdgpu_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+	if (vgacon_text_force()) {
+		DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
+		return -EINVAL;
+	}
+#endif
+	DRM_INFO("amdgpu kernel modesetting enabled.\n");
+	driver = &kms_driver;
+	pdriver = &amdgpu_kms_pci_driver;
+	driver->driver_features |= DRIVER_MODESET;
+	driver->num_ioctls = amdgpu_max_kms_ioctl;
+	amdgpu_register_atpx_handler();
+
+	/* let modprobe override vga console setting */
+	return drm_pci_init(driver, pdriver);
+}
+
+static void __exit amdgpu_exit(void)
+{
+	drm_pci_exit(driver, pdriver);
+	amdgpu_unregister_atpx_handler();
+}
+
+module_init(amdgpu_init);
+module_exit(amdgpu_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");

+ 48 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h

@@ -0,0 +1,48 @@
+/* amdgpu_drv.h -- Private header for amdgpu driver -*- linux-c -*-
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_DRV_H__
+#define __AMDGPU_DRV_H__
+
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+
+#include "amdgpu_family.h"
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR		"AMD linux driver team"
+
+#define DRIVER_NAME		"amdgpu"
+#define DRIVER_DESC		"AMD GPU"
+#define DRIVER_DATE		"20150101"
+
+long amdgpu_drm_ioctl(struct file *filp,
+		      unsigned int cmd, unsigned long arg);
+
+#endif

+ 245 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c

@@ -0,0 +1,245 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+#include "amdgpu_connectors.h"
+#include "atom.h"
+#include "atombios_encoders.h"
+
+void
+amdgpu_link_encoder_connector(struct drm_device *dev)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+	struct drm_connector *connector;
+	struct amdgpu_connector *amdgpu_connector;
+	struct drm_encoder *encoder;
+	struct amdgpu_encoder *amdgpu_encoder;
+
+	/* walk the list and link encoders to connectors */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		amdgpu_connector = to_amdgpu_connector(connector);
+		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+			amdgpu_encoder = to_amdgpu_encoder(encoder);
+			if (amdgpu_encoder->devices & amdgpu_connector->devices) {
+				drm_mode_connector_attach_encoder(connector, encoder);
+				if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+					amdgpu_atombios_encoder_init_backlight(amdgpu_encoder, connector);
+					adev->mode_info.bl_encoder = amdgpu_encoder;
+				}
+			}
+		}
+	}
+}
+
+void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (connector->encoder == encoder) {
+			struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+			amdgpu_encoder->active_device = amdgpu_encoder->devices & amdgpu_connector->devices;
+			DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
+				  amdgpu_encoder->active_device, amdgpu_encoder->devices,
+				  amdgpu_connector->devices, encoder->encoder_type);
+		}
+	}
+}
+
+struct drm_connector *
+amdgpu_get_connector_for_encoder(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct drm_connector *connector;
+	struct amdgpu_connector *amdgpu_connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		amdgpu_connector = to_amdgpu_connector(connector);
+		if (amdgpu_encoder->active_device & amdgpu_connector->devices)
+			return connector;
+	}
+	return NULL;
+}
+
+struct drm_connector *
+amdgpu_get_connector_for_encoder_init(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct drm_connector *connector;
+	struct amdgpu_connector *amdgpu_connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		amdgpu_connector = to_amdgpu_connector(connector);
+		if (amdgpu_encoder->devices & amdgpu_connector->devices)
+			return connector;
+	}
+	return NULL;
+}
+
+struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct drm_encoder *other_encoder;
+	struct amdgpu_encoder *other_amdgpu_encoder;
+
+	if (amdgpu_encoder->is_ext_encoder)
+		return NULL;
+
+	list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
+		if (other_encoder == encoder)
+			continue;
+		other_amdgpu_encoder = to_amdgpu_encoder(other_encoder);
+		if (other_amdgpu_encoder->is_ext_encoder &&
+		    (amdgpu_encoder->devices & other_amdgpu_encoder->devices))
+			return other_encoder;
+	}
+	return NULL;
+}
+
+u16 amdgpu_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
+{
+	struct drm_encoder *other_encoder = amdgpu_get_external_encoder(encoder);
+
+	if (other_encoder) {
+		struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(other_encoder);
+
+		switch (amdgpu_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_TRAVIS:
+		case ENCODER_OBJECT_ID_NUTMEG:
+			return amdgpu_encoder->encoder_id;
+		default:
+			return ENCODER_OBJECT_ID_NONE;
+		}
+	}
+	return ENCODER_OBJECT_ID_NONE;
+}
+
+void amdgpu_panel_mode_fixup(struct drm_encoder *encoder,
+			     struct drm_display_mode *adjusted_mode)
+{
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+	unsigned hblank = native_mode->htotal - native_mode->hdisplay;
+	unsigned vblank = native_mode->vtotal - native_mode->vdisplay;
+	unsigned hover = native_mode->hsync_start - native_mode->hdisplay;
+	unsigned vover = native_mode->vsync_start - native_mode->vdisplay;
+	unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start;
+	unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start;
+
+	adjusted_mode->clock = native_mode->clock;
+	adjusted_mode->flags = native_mode->flags;
+
+	adjusted_mode->hdisplay = native_mode->hdisplay;
+	adjusted_mode->vdisplay = native_mode->vdisplay;
+
+	adjusted_mode->htotal = native_mode->hdisplay + hblank;
+	adjusted_mode->hsync_start = native_mode->hdisplay + hover;
+	adjusted_mode->hsync_end = adjusted_mode->hsync_start + hsync_width;
+
+	adjusted_mode->vtotal = native_mode->vdisplay + vblank;
+	adjusted_mode->vsync_start = native_mode->vdisplay + vover;
+	adjusted_mode->vsync_end = adjusted_mode->vsync_start + vsync_width;
+
+	drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+
+	adjusted_mode->crtc_hdisplay = native_mode->hdisplay;
+	adjusted_mode->crtc_vdisplay = native_mode->vdisplay;
+
+	adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + hblank;
+	adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + hover;
+	adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + hsync_width;
+
+	adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + vblank;
+	adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + vover;
+	adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + vsync_width;
+
+}
+
+bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder,
+				    u32 pixel_clock)
+{
+	struct drm_connector *connector;
+	struct amdgpu_connector *amdgpu_connector;
+	struct amdgpu_connector_atom_dig *dig_connector;
+
+	connector = amdgpu_get_connector_for_encoder(encoder);
+	/* if we don't have an active device yet, just use one of
+	 * the connectors tied to the encoder.
+	 */
+	if (!connector)
+		connector = amdgpu_get_connector_for_encoder_init(encoder);
+	amdgpu_connector = to_amdgpu_connector(connector);
+
+	switch (connector->connector_type) {
+	case DRM_MODE_CONNECTOR_DVII:
+	case DRM_MODE_CONNECTOR_HDMIB:
+		if (amdgpu_connector->use_digital) {
+			/* HDMI 1.3 supports up to 340 Mhz over single link */
+			if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+				if (pixel_clock > 340000)
+					return true;
+				else
+					return false;
+			} else {
+				if (pixel_clock > 165000)
+					return true;
+				else
+					return false;
+			}
+		} else
+			return false;
+	case DRM_MODE_CONNECTOR_DVID:
+	case DRM_MODE_CONNECTOR_HDMIA:
+	case DRM_MODE_CONNECTOR_DisplayPort:
+		dig_connector = amdgpu_connector->con_priv;
+		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+			return false;
+		else {
+			/* HDMI 1.3 supports up to 340 Mhz over single link */
+			if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+				if (pixel_clock > 340000)
+					return true;
+				else
+					return false;
+			} else {
+				if (pixel_clock > 165000)
+					return true;
+				else
+					return false;
+			}
+		}
+	default:
+		return false;
+	}
+}

+ 62 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_family.h

@@ -0,0 +1,62 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+/* this file defines the CHIP_  and family flags used in the pciids,
+ * its is common between kms and non-kms because duplicating it and
+ * changing one place is fail.
+ */
+#ifndef AMDGPU_FAMILY_H
+#define AMDGPU_FAMILY_H
+/*
+ * Supported ASIC types
+ */
+enum amdgpu_asic_type {
+	CHIP_BONAIRE = 0,
+	CHIP_KAVERI,
+	CHIP_KABINI,
+	CHIP_HAWAII,
+	CHIP_MULLINS,
+	CHIP_TOPAZ,
+	CHIP_TONGA,
+	CHIP_CARRIZO,
+	CHIP_LAST,
+};
+
+/*
+ * Chip flags
+ */
+enum amdgpu_chip_flags {
+	AMDGPU_ASIC_MASK = 0x0000ffffUL,
+	AMDGPU_FLAGS_MASK  = 0xffff0000UL,
+	AMDGPU_IS_MOBILITY = 0x00010000UL,
+	AMDGPU_IS_APU      = 0x00020000UL,
+	AMDGPU_IS_PX       = 0x00040000UL,
+	AMDGPU_EXP_HW_SUPPORT = 0x00080000UL,
+};
+
+#endif

+ 421 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c

@@ -0,0 +1,421 @@
+/*
+ * Copyright © 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     David Airlie
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+#include "cikd.h"
+
+#include <drm/drm_fb_helper.h>
+
+#include <linux/vga_switcheroo.h>
+
+/* object hierarchy -
+   this contains a helper + a amdgpu fb
+   the helper contains a pointer to amdgpu framebuffer baseclass.
+*/
+struct amdgpu_fbdev {
+	struct drm_fb_helper helper;
+	struct amdgpu_framebuffer rfb;
+	struct list_head fbdev_list;
+	struct amdgpu_device *adev;
+};
+
+static struct fb_ops amdgpufb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_fillrect = cfb_fillrect,
+	.fb_copyarea = cfb_copyarea,
+	.fb_imageblit = cfb_imageblit,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+	.fb_debug_enter = drm_fb_helper_debug_enter,
+	.fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+
+int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled)
+{
+	int aligned = width;
+	int pitch_mask = 0;
+
+	switch (bpp / 8) {
+	case 1:
+		pitch_mask = 255;
+		break;
+	case 2:
+		pitch_mask = 127;
+		break;
+	case 3:
+	case 4:
+		pitch_mask = 63;
+		break;
+	}
+
+	aligned += pitch_mask;
+	aligned &= ~pitch_mask;
+	return aligned;
+}
+
+static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
+{
+	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(gobj);
+	int ret;
+
+	ret = amdgpu_bo_reserve(rbo, false);
+	if (likely(ret == 0)) {
+		amdgpu_bo_kunmap(rbo);
+		amdgpu_bo_unpin(rbo);
+		amdgpu_bo_unreserve(rbo);
+	}
+	drm_gem_object_unreference_unlocked(gobj);
+}
+
+static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
+					 struct drm_mode_fb_cmd2 *mode_cmd,
+					 struct drm_gem_object **gobj_p)
+{
+	struct amdgpu_device *adev = rfbdev->adev;
+	struct drm_gem_object *gobj = NULL;
+	struct amdgpu_bo *rbo = NULL;
+	bool fb_tiled = false; /* useful for testing */
+	u32 tiling_flags = 0;
+	int ret;
+	int aligned_size, size;
+	int height = mode_cmd->height;
+	u32 bpp, depth;
+
+	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+	/* need to align pitch with crtc limits */
+	mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, bpp,
+						  fb_tiled) * ((bpp + 1) / 8);
+
+	height = ALIGN(mode_cmd->height, 8);
+	size = mode_cmd->pitches[0] * height;
+	aligned_size = ALIGN(size, PAGE_SIZE);
+	ret = amdgpu_gem_object_create(adev, aligned_size, 0,
+				       AMDGPU_GEM_DOMAIN_VRAM,
+				       0, true,
+				       &gobj);
+	if (ret) {
+		printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
+		       aligned_size);
+		return -ENOMEM;
+	}
+	rbo = gem_to_amdgpu_bo(gobj);
+
+	if (fb_tiled)
+		tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1);
+
+	ret = amdgpu_bo_reserve(rbo, false);
+	if (unlikely(ret != 0))
+		goto out_unref;
+
+	if (tiling_flags) {
+		ret = amdgpu_bo_set_tiling_flags(rbo,
+						 tiling_flags);
+		if (ret)
+			dev_err(adev->dev, "FB failed to set tiling flags\n");
+	}
+
+
+	ret = amdgpu_bo_pin_restricted(rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL);
+	if (ret) {
+		amdgpu_bo_unreserve(rbo);
+		goto out_unref;
+	}
+	ret = amdgpu_bo_kmap(rbo, NULL);
+	amdgpu_bo_unreserve(rbo);
+	if (ret) {
+		goto out_unref;
+	}
+
+	*gobj_p = gobj;
+	return 0;
+out_unref:
+	amdgpufb_destroy_pinned_object(gobj);
+	*gobj_p = NULL;
+	return ret;
+}
+
+static int amdgpufb_create(struct drm_fb_helper *helper,
+			   struct drm_fb_helper_surface_size *sizes)
+{
+	struct amdgpu_fbdev *rfbdev = (struct amdgpu_fbdev *)helper;
+	struct amdgpu_device *adev = rfbdev->adev;
+	struct fb_info *info;
+	struct drm_framebuffer *fb = NULL;
+	struct drm_mode_fb_cmd2 mode_cmd;
+	struct drm_gem_object *gobj = NULL;
+	struct amdgpu_bo *rbo = NULL;
+	struct device *device = &adev->pdev->dev;
+	int ret;
+	unsigned long tmp;
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+
+	if (sizes->surface_bpp == 24)
+		sizes->surface_bpp = 32;
+
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+							  sizes->surface_depth);
+
+	ret = amdgpufb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
+	if (ret) {
+		DRM_ERROR("failed to create fbcon object %d\n", ret);
+		return ret;
+	}
+
+	rbo = gem_to_amdgpu_bo(gobj);
+
+	/* okay we have an object now allocate the framebuffer */
+	info = framebuffer_alloc(0, device);
+	if (info == NULL) {
+		ret = -ENOMEM;
+		goto out_unref;
+	}
+
+	info->par = rfbdev;
+
+	ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
+	if (ret) {
+		DRM_ERROR("failed to initialize framebuffer %d\n", ret);
+		goto out_unref;
+	}
+
+	fb = &rfbdev->rfb.base;
+
+	/* setup helper */
+	rfbdev->helper.fb = fb;
+	rfbdev->helper.fbdev = info;
+
+	memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo));
+
+	strcpy(info->fix.id, "amdgpudrmfb");
+
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+
+	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+	info->fbops = &amdgpufb_ops;
+
+	tmp = amdgpu_bo_gpu_offset(rbo) - adev->mc.vram_start;
+	info->fix.smem_start = adev->mc.aper_base + tmp;
+	info->fix.smem_len = amdgpu_bo_size(rbo);
+	info->screen_base = rbo->kptr;
+	info->screen_size = amdgpu_bo_size(rbo);
+
+	drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
+
+	/* setup aperture base/size for vesafb takeover */
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures) {
+		ret = -ENOMEM;
+		goto out_unref;
+	}
+	info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
+	info->apertures->ranges[0].size = adev->mc.aper_size;
+
+	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+
+	if (info->screen_base == NULL) {
+		ret = -ENOSPC;
+		goto out_unref;
+	}
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		ret = -ENOMEM;
+		goto out_unref;
+	}
+
+	DRM_INFO("fb mappable at 0x%lX\n",  info->fix.smem_start);
+	DRM_INFO("vram apper at 0x%lX\n",  (unsigned long)adev->mc.aper_base);
+	DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(rbo));
+	DRM_INFO("fb depth is %d\n", fb->depth);
+	DRM_INFO("   pitch is %d\n", fb->pitches[0]);
+
+	vga_switcheroo_client_fb_set(adev->ddev->pdev, info);
+	return 0;
+
+out_unref:
+	if (rbo) {
+
+	}
+	if (fb && ret) {
+		drm_gem_object_unreference(gobj);
+		drm_framebuffer_unregister_private(fb);
+		drm_framebuffer_cleanup(fb);
+		kfree(fb);
+	}
+	return ret;
+}
+
+void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev)
+{
+	if (adev->mode_info.rfbdev)
+		drm_fb_helper_hotplug_event(&adev->mode_info.rfbdev->helper);
+}
+
+static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
+{
+	struct fb_info *info;
+	struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
+
+	if (rfbdev->helper.fbdev) {
+		info = rfbdev->helper.fbdev;
+
+		unregister_framebuffer(info);
+		if (info->cmap.len)
+			fb_dealloc_cmap(&info->cmap);
+		framebuffer_release(info);
+	}
+
+	if (rfb->obj) {
+		amdgpufb_destroy_pinned_object(rfb->obj);
+		rfb->obj = NULL;
+	}
+	drm_fb_helper_fini(&rfbdev->helper);
+	drm_framebuffer_unregister_private(&rfb->base);
+	drm_framebuffer_cleanup(&rfb->base);
+
+	return 0;
+}
+
+/** Sets the color ramps on behalf of fbcon */
+static void amdgpu_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+				      u16 blue, int regno)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+	amdgpu_crtc->lut_r[regno] = red >> 6;
+	amdgpu_crtc->lut_g[regno] = green >> 6;
+	amdgpu_crtc->lut_b[regno] = blue >> 6;
+}
+
+/** Gets the color ramps on behalf of fbcon */
+static void amdgpu_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+				      u16 *blue, int regno)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+	*red = amdgpu_crtc->lut_r[regno] << 6;
+	*green = amdgpu_crtc->lut_g[regno] << 6;
+	*blue = amdgpu_crtc->lut_b[regno] << 6;
+}
+
+static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs = {
+	.gamma_set = amdgpu_crtc_fb_gamma_set,
+	.gamma_get = amdgpu_crtc_fb_gamma_get,
+	.fb_probe = amdgpufb_create,
+};
+
+int amdgpu_fbdev_init(struct amdgpu_device *adev)
+{
+	struct amdgpu_fbdev *rfbdev;
+	int bpp_sel = 32;
+	int ret;
+
+	/* don't init fbdev on hw without DCE */
+	if (!adev->mode_info.mode_config_initialized)
+		return 0;
+
+	/* select 8 bpp console on low vram cards */
+	if (adev->mc.real_vram_size <= (32*1024*1024))
+		bpp_sel = 8;
+
+	rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL);
+	if (!rfbdev)
+		return -ENOMEM;
+
+	rfbdev->adev = adev;
+	adev->mode_info.rfbdev = rfbdev;
+
+	drm_fb_helper_prepare(adev->ddev, &rfbdev->helper,
+			&amdgpu_fb_helper_funcs);
+
+	ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper,
+				 adev->mode_info.num_crtc,
+				 AMDGPUFB_CONN_LIMIT);
+	if (ret) {
+		kfree(rfbdev);
+		return ret;
+	}
+
+	drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
+
+	/* disable all the possible outputs/crtcs before entering KMS mode */
+	drm_helper_disable_unused_functions(adev->ddev);
+
+	drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
+	return 0;
+}
+
+void amdgpu_fbdev_fini(struct amdgpu_device *adev)
+{
+	if (!adev->mode_info.rfbdev)
+		return;
+
+	amdgpu_fbdev_destroy(adev->ddev, adev->mode_info.rfbdev);
+	kfree(adev->mode_info.rfbdev);
+	adev->mode_info.rfbdev = NULL;
+}
+
+void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
+{
+	if (adev->mode_info.rfbdev)
+		fb_set_suspend(adev->mode_info.rfbdev->helper.fbdev, state);
+}
+
+int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
+{
+	struct amdgpu_bo *robj;
+	int size = 0;
+
+	if (!adev->mode_info.rfbdev)
+		return 0;
+
+	robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj);
+	size += amdgpu_bo_size(robj);
+	return size;
+}
+
+bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
+{
+	if (!adev->mode_info.rfbdev)
+		return false;
+	if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj))
+		return true;
+	return false;
+}

+ 1127 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c

@@ -0,0 +1,1127 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Jerome Glisse <glisse@freedesktop.org>
+ *    Dave Airlie
+ */
+#include <linux/seq_file.h>
+#include <linux/atomic.h>
+#include <linux/wait.h>
+#include <linux/kref.h>
+#include <linux/slab.h>
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_trace.h"
+
+/*
+ * Fences
+ * Fences mark an event in the GPUs pipeline and are used
+ * for GPU/CPU synchronization.  When the fence is written,
+ * it is expected that all buffers associated with that fence
+ * are no longer in use by the associated ring on the GPU and
+ * that the the relevant GPU caches have been flushed.
+ */
+
+/**
+ * amdgpu_fence_write - write a fence value
+ *
+ * @ring: ring the fence is associated with
+ * @seq: sequence number to write
+ *
+ * Writes a fence value to memory (all asics).
+ */
+static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
+{
+	struct amdgpu_fence_driver *drv = &ring->fence_drv;
+
+	if (drv->cpu_addr)
+		*drv->cpu_addr = cpu_to_le32(seq);
+}
+
+/**
+ * amdgpu_fence_read - read a fence value
+ *
+ * @ring: ring the fence is associated with
+ *
+ * Reads a fence value from memory (all asics).
+ * Returns the value of the fence read from memory.
+ */
+static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
+{
+	struct amdgpu_fence_driver *drv = &ring->fence_drv;
+	u32 seq = 0;
+
+	if (drv->cpu_addr)
+		seq = le32_to_cpu(*drv->cpu_addr);
+	else
+		seq = lower_32_bits(atomic64_read(&drv->last_seq));
+
+	return seq;
+}
+
+/**
+ * amdgpu_fence_schedule_check - schedule lockup check
+ *
+ * @ring: pointer to struct amdgpu_ring
+ *
+ * Queues a delayed work item to check for lockups.
+ */
+static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring)
+{
+	/*
+	 * Do not reset the timer here with mod_delayed_work,
+	 * this can livelock in an interaction with TTM delayed destroy.
+	 */
+	queue_delayed_work(system_power_efficient_wq,
+		&ring->fence_drv.lockup_work,
+		AMDGPU_FENCE_JIFFIES_TIMEOUT);
+}
+
+/**
+ * amdgpu_fence_emit - emit a fence on the requested ring
+ *
+ * @ring: ring the fence is associated with
+ * @owner: creator of the fence
+ * @fence: amdgpu fence object
+ *
+ * Emits a fence command on the requested ring (all asics).
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
+		      struct amdgpu_fence **fence)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	/* we are protected by the ring emission mutex */
+	*fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
+	if ((*fence) == NULL) {
+		return -ENOMEM;
+	}
+	(*fence)->seq = ++ring->fence_drv.sync_seq[ring->idx];
+	(*fence)->ring = ring;
+	(*fence)->owner = owner;
+	fence_init(&(*fence)->base, &amdgpu_fence_ops,
+		&adev->fence_queue.lock, adev->fence_context + ring->idx,
+		(*fence)->seq);
+	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, (*fence)->seq, false);
+	trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq);
+	return 0;
+}
+
+/**
+ * amdgpu_fence_check_signaled - callback from fence_queue
+ *
+ * this function is called with fence_queue lock held, which is also used
+ * for the fence locking itself, so unlocked variants are used for
+ * fence_signal, and remove_wait_queue.
+ */
+static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
+{
+	struct amdgpu_fence *fence;
+	struct amdgpu_device *adev;
+	u64 seq;
+	int ret;
+
+	fence = container_of(wait, struct amdgpu_fence, fence_wake);
+	adev = fence->ring->adev;
+
+	/*
+	 * We cannot use amdgpu_fence_process here because we're already
+	 * in the waitqueue, in a call from wake_up_all.
+	 */
+	seq = atomic64_read(&fence->ring->fence_drv.last_seq);
+	if (seq >= fence->seq) {
+		ret = fence_signal_locked(&fence->base);
+		if (!ret)
+			FENCE_TRACE(&fence->base, "signaled from irq context\n");
+		else
+			FENCE_TRACE(&fence->base, "was already signaled\n");
+
+		amdgpu_irq_put(adev, fence->ring->fence_drv.irq_src,
+				fence->ring->fence_drv.irq_type);
+		__remove_wait_queue(&adev->fence_queue, &fence->fence_wake);
+		fence_put(&fence->base);
+	} else
+		FENCE_TRACE(&fence->base, "pending\n");
+	return 0;
+}
+
+/**
+ * amdgpu_fence_activity - check for fence activity
+ *
+ * @ring: pointer to struct amdgpu_ring
+ *
+ * Checks the current fence value and calculates the last
+ * signalled fence value. Returns true if activity occured
+ * on the ring, and the fence_queue should be waken up.
+ */
+static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
+{
+	uint64_t seq, last_seq, last_emitted;
+	unsigned count_loop = 0;
+	bool wake = false;
+
+	/* Note there is a scenario here for an infinite loop but it's
+	 * very unlikely to happen. For it to happen, the current polling
+	 * process need to be interrupted by another process and another
+	 * process needs to update the last_seq btw the atomic read and
+	 * xchg of the current process.
+	 *
+	 * More over for this to go in infinite loop there need to be
+	 * continuously new fence signaled ie amdgpu_fence_read needs
+	 * to return a different value each time for both the currently
+	 * polling process and the other process that xchg the last_seq
+	 * btw atomic read and xchg of the current process. And the
+	 * value the other process set as last seq must be higher than
+	 * the seq value we just read. Which means that current process
+	 * need to be interrupted after amdgpu_fence_read and before
+	 * atomic xchg.
+	 *
+	 * To be even more safe we count the number of time we loop and
+	 * we bail after 10 loop just accepting the fact that we might
+	 * have temporarly set the last_seq not to the true real last
+	 * seq but to an older one.
+	 */
+	last_seq = atomic64_read(&ring->fence_drv.last_seq);
+	do {
+		last_emitted = ring->fence_drv.sync_seq[ring->idx];
+		seq = amdgpu_fence_read(ring);
+		seq |= last_seq & 0xffffffff00000000LL;
+		if (seq < last_seq) {
+			seq &= 0xffffffff;
+			seq |= last_emitted & 0xffffffff00000000LL;
+		}
+
+		if (seq <= last_seq || seq > last_emitted) {
+			break;
+		}
+		/* If we loop over we don't want to return without
+		 * checking if a fence is signaled as it means that the
+		 * seq we just read is different from the previous on.
+		 */
+		wake = true;
+		last_seq = seq;
+		if ((count_loop++) > 10) {
+			/* We looped over too many time leave with the
+			 * fact that we might have set an older fence
+			 * seq then the current real last seq as signaled
+			 * by the hw.
+			 */
+			break;
+		}
+	} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
+
+	if (seq < last_emitted)
+		amdgpu_fence_schedule_check(ring);
+
+	return wake;
+}
+
+/**
+ * amdgpu_fence_check_lockup - check for hardware lockup
+ *
+ * @work: delayed work item
+ *
+ * Checks for fence activity and if there is none probe
+ * the hardware if a lockup occured.
+ */
+static void amdgpu_fence_check_lockup(struct work_struct *work)
+{
+	struct amdgpu_fence_driver *fence_drv;
+	struct amdgpu_ring *ring;
+
+	fence_drv = container_of(work, struct amdgpu_fence_driver,
+				lockup_work.work);
+	ring = fence_drv->ring;
+
+	if (!down_read_trylock(&ring->adev->exclusive_lock)) {
+		/* just reschedule the check if a reset is going on */
+		amdgpu_fence_schedule_check(ring);
+		return;
+	}
+
+	if (fence_drv->delayed_irq && ring->adev->ddev->irq_enabled) {
+		fence_drv->delayed_irq = false;
+		amdgpu_irq_update(ring->adev, fence_drv->irq_src,
+				fence_drv->irq_type);
+	}
+
+	if (amdgpu_fence_activity(ring))
+		wake_up_all(&ring->adev->fence_queue);
+	else if (amdgpu_ring_is_lockup(ring)) {
+		/* good news we believe it's a lockup */
+		dev_warn(ring->adev->dev, "GPU lockup (current fence id "
+			"0x%016llx last fence id 0x%016llx on ring %d)\n",
+			(uint64_t)atomic64_read(&fence_drv->last_seq),
+			fence_drv->sync_seq[ring->idx], ring->idx);
+
+		/* remember that we need an reset */
+		ring->adev->needs_reset = true;
+		wake_up_all(&ring->adev->fence_queue);
+	}
+	up_read(&ring->adev->exclusive_lock);
+}
+
+/**
+ * amdgpu_fence_process - process a fence
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Checks the current fence value and wakes the fence queue
+ * if the sequence number has increased (all asics).
+ */
+void amdgpu_fence_process(struct amdgpu_ring *ring)
+{
+	uint64_t seq, last_seq, last_emitted;
+	unsigned count_loop = 0;
+	bool wake = false;
+
+	/* Note there is a scenario here for an infinite loop but it's
+	 * very unlikely to happen. For it to happen, the current polling
+	 * process need to be interrupted by another process and another
+	 * process needs to update the last_seq btw the atomic read and
+	 * xchg of the current process.
+	 *
+	 * More over for this to go in infinite loop there need to be
+	 * continuously new fence signaled ie amdgpu_fence_read needs
+	 * to return a different value each time for both the currently
+	 * polling process and the other process that xchg the last_seq
+	 * btw atomic read and xchg of the current process. And the
+	 * value the other process set as last seq must be higher than
+	 * the seq value we just read. Which means that current process
+	 * need to be interrupted after amdgpu_fence_read and before
+	 * atomic xchg.
+	 *
+	 * To be even more safe we count the number of time we loop and
+	 * we bail after 10 loop just accepting the fact that we might
+	 * have temporarly set the last_seq not to the true real last
+	 * seq but to an older one.
+	 */
+	last_seq = atomic64_read(&ring->fence_drv.last_seq);
+	do {
+		last_emitted = ring->fence_drv.sync_seq[ring->idx];
+		seq = amdgpu_fence_read(ring);
+		seq |= last_seq & 0xffffffff00000000LL;
+		if (seq < last_seq) {
+			seq &= 0xffffffff;
+			seq |= last_emitted & 0xffffffff00000000LL;
+		}
+
+		if (seq <= last_seq || seq > last_emitted) {
+			break;
+		}
+		/* If we loop over we don't want to return without
+		 * checking if a fence is signaled as it means that the
+		 * seq we just read is different from the previous on.
+		 */
+		wake = true;
+		last_seq = seq;
+		if ((count_loop++) > 10) {
+			/* We looped over too many time leave with the
+			 * fact that we might have set an older fence
+			 * seq then the current real last seq as signaled
+			 * by the hw.
+			 */
+			break;
+		}
+	} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
+
+	if (wake)
+		wake_up_all(&ring->adev->fence_queue);
+}
+
+/**
+ * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
+ *
+ * @ring: ring the fence is associated with
+ * @seq: sequence number
+ *
+ * Check if the last signaled fence sequnce number is >= the requested
+ * sequence number (all asics).
+ * Returns true if the fence has signaled (current fence value
+ * is >= requested value) or false if it has not (current fence
+ * value is < the requested value.  Helper function for
+ * amdgpu_fence_signaled().
+ */
+static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq)
+{
+	if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
+		return true;
+
+	/* poll new last sequence at least once */
+	amdgpu_fence_process(ring);
+	if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
+		return true;
+
+	return false;
+}
+
+static bool amdgpu_fence_is_signaled(struct fence *f)
+{
+	struct amdgpu_fence *fence = to_amdgpu_fence(f);
+	struct amdgpu_ring *ring = fence->ring;
+	struct amdgpu_device *adev = ring->adev;
+
+	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
+		return true;
+
+	if (down_read_trylock(&adev->exclusive_lock)) {
+		amdgpu_fence_process(ring);
+		up_read(&adev->exclusive_lock);
+
+		if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
+			return true;
+	}
+	return false;
+}
+
+/**
+ * amdgpu_fence_enable_signaling - enable signalling on fence
+ * @fence: fence
+ *
+ * This function is called with fence_queue lock held, and adds a callback
+ * to fence_queue that checks if this fence is signaled, and if so it
+ * signals the fence and removes itself.
+ */
+static bool amdgpu_fence_enable_signaling(struct fence *f)
+{
+	struct amdgpu_fence *fence = to_amdgpu_fence(f);
+	struct amdgpu_ring *ring = fence->ring;
+	struct amdgpu_device *adev = ring->adev;
+
+	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
+		return false;
+
+	if (down_read_trylock(&adev->exclusive_lock)) {
+		amdgpu_irq_get(adev, ring->fence_drv.irq_src,
+			ring->fence_drv.irq_type);
+		if (amdgpu_fence_activity(ring))
+			wake_up_all_locked(&adev->fence_queue);
+
+		/* did fence get signaled after we enabled the sw irq? */
+		if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) {
+			amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+				ring->fence_drv.irq_type);
+			up_read(&adev->exclusive_lock);
+			return false;
+		}
+
+		up_read(&adev->exclusive_lock);
+	} else {
+		/* we're probably in a lockup, lets not fiddle too much */
+		if (amdgpu_irq_get_delayed(adev, ring->fence_drv.irq_src,
+			ring->fence_drv.irq_type))
+			ring->fence_drv.delayed_irq = true;
+		amdgpu_fence_schedule_check(ring);
+	}
+
+	fence->fence_wake.flags = 0;
+	fence->fence_wake.private = NULL;
+	fence->fence_wake.func = amdgpu_fence_check_signaled;
+	__add_wait_queue(&adev->fence_queue, &fence->fence_wake);
+	fence_get(f);
+	FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
+	return true;
+}
+
+/**
+ * amdgpu_fence_signaled - check if a fence has signaled
+ *
+ * @fence: amdgpu fence object
+ *
+ * Check if the requested fence has signaled (all asics).
+ * Returns true if the fence has signaled or false if it has not.
+ */
+bool amdgpu_fence_signaled(struct amdgpu_fence *fence)
+{
+	if (!fence)
+		return true;
+
+	if (amdgpu_fence_seq_signaled(fence->ring, fence->seq)) {
+		if (!fence_signal(&fence->base))
+			FENCE_TRACE(&fence->base, "signaled from amdgpu_fence_signaled\n");
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * amdgpu_fence_any_seq_signaled - check if any sequence number is signaled
+ *
+ * @adev: amdgpu device pointer
+ * @seq: sequence numbers
+ *
+ * Check if the last signaled fence sequnce number is >= the requested
+ * sequence number (all asics).
+ * Returns true if any has signaled (current value is >= requested value)
+ * or false if it has not. Helper function for amdgpu_fence_wait_seq.
+ */
+static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq)
+{
+	unsigned i;
+
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+		if (!adev->rings[i] || !seq[i])
+			continue;
+
+		if (amdgpu_fence_seq_signaled(adev->rings[i], seq[i]))
+			return true;
+	}
+
+	return false;
+}
+
+/**
+ * amdgpu_fence_wait_seq_timeout - wait for a specific sequence numbers
+ *
+ * @adev: amdgpu device pointer
+ * @target_seq: sequence number(s) we want to wait for
+ * @intr: use interruptable sleep
+ * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
+ *
+ * Wait for the requested sequence number(s) to be written by any ring
+ * (all asics).  Sequnce number array is indexed by ring id.
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the sequence number.  Helper function
+ * for amdgpu_fence_wait_*().
+ * Returns remaining time if the sequence number has passed, 0 when
+ * the wait timeout, or an error for all other cases.
+ * -EDEADLK is returned when a GPU lockup has been detected.
+ */
+long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, u64 *target_seq,
+				   bool intr, long timeout)
+{
+	uint64_t last_seq[AMDGPU_MAX_RINGS];
+	bool signaled;
+	int i, r;
+
+	while (!amdgpu_fence_any_seq_signaled(adev, target_seq)) {
+
+		/* Save current sequence values, used to check for GPU lockups */
+		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+			struct amdgpu_ring *ring = adev->rings[i];
+
+			if (!ring || !target_seq[i])
+				continue;
+
+			last_seq[i] = atomic64_read(&ring->fence_drv.last_seq);
+			trace_amdgpu_fence_wait_begin(adev->ddev, i, target_seq[i]);
+			amdgpu_irq_get(adev, ring->fence_drv.irq_src,
+				       ring->fence_drv.irq_type);
+		}
+
+		if (intr) {
+			r = wait_event_interruptible_timeout(adev->fence_queue, (
+				(signaled = amdgpu_fence_any_seq_signaled(adev, target_seq))
+				 || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
+		} else {
+			r = wait_event_timeout(adev->fence_queue, (
+				(signaled = amdgpu_fence_any_seq_signaled(adev, target_seq))
+				 || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
+		}
+
+		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+			struct amdgpu_ring *ring = adev->rings[i];
+
+			if (!ring || !target_seq[i])
+				continue;
+
+			amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+				       ring->fence_drv.irq_type);
+			trace_amdgpu_fence_wait_end(adev->ddev, i, target_seq[i]);
+		}
+
+		if (unlikely(r < 0))
+			return r;
+
+		if (unlikely(!signaled)) {
+
+			if (adev->needs_reset)
+				return -EDEADLK;
+
+			/* we were interrupted for some reason and fence
+			 * isn't signaled yet, resume waiting */
+			if (r)
+				continue;
+
+			for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+				struct amdgpu_ring *ring = adev->rings[i];
+
+				if (!ring || !target_seq[i])
+					continue;
+
+				if (last_seq[i] != atomic64_read(&ring->fence_drv.last_seq))
+					break;
+			}
+
+			if (i != AMDGPU_MAX_RINGS)
+				continue;
+
+			for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+				if (!adev->rings[i] || !target_seq[i])
+					continue;
+
+				if (amdgpu_ring_is_lockup(adev->rings[i]))
+					break;
+			}
+
+			if (i < AMDGPU_MAX_RINGS) {
+				/* good news we believe it's a lockup */
+				dev_warn(adev->dev, "GPU lockup (waiting for "
+					 "0x%016llx last fence id 0x%016llx on"
+					 " ring %d)\n",
+					 target_seq[i], last_seq[i], i);
+
+				/* remember that we need an reset */
+				adev->needs_reset = true;
+				wake_up_all(&adev->fence_queue);
+				return -EDEADLK;
+			}
+
+			if (timeout < MAX_SCHEDULE_TIMEOUT) {
+				timeout -= AMDGPU_FENCE_JIFFIES_TIMEOUT;
+				if (timeout <= 0) {
+					return 0;
+				}
+			}
+		}
+	}
+	return timeout;
+}
+
+/**
+ * amdgpu_fence_wait - wait for a fence to signal
+ *
+ * @fence: amdgpu fence object
+ * @intr: use interruptable sleep
+ *
+ * Wait for the requested fence to signal (all asics).
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the fence.
+ * Returns 0 if the fence has passed, error for all other cases.
+ */
+int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr)
+{
+	uint64_t seq[AMDGPU_MAX_RINGS] = {};
+	long r;
+
+	seq[fence->ring->idx] = fence->seq;
+	r = amdgpu_fence_wait_seq_timeout(fence->ring->adev, seq, intr, MAX_SCHEDULE_TIMEOUT);
+	if (r < 0) {
+		return r;
+	}
+
+	r = fence_signal(&fence->base);
+	if (!r)
+		FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
+	return 0;
+}
+
+/**
+ * amdgpu_fence_wait_any - wait for a fence to signal on any ring
+ *
+ * @adev: amdgpu device pointer
+ * @fences: amdgpu fence object(s)
+ * @intr: use interruptable sleep
+ *
+ * Wait for any requested fence to signal (all asics).  Fence
+ * array is indexed by ring id.  @intr selects whether to use
+ * interruptable (true) or non-interruptable (false) sleep when
+ * waiting for the fences. Used by the suballocator.
+ * Returns 0 if any fence has passed, error for all other cases.
+ */
+int amdgpu_fence_wait_any(struct amdgpu_device *adev,
+			  struct amdgpu_fence **fences,
+			  bool intr)
+{
+	uint64_t seq[AMDGPU_MAX_RINGS];
+	unsigned i, num_rings = 0;
+	long r;
+
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+		seq[i] = 0;
+
+		if (!fences[i]) {
+			continue;
+		}
+
+		seq[i] = fences[i]->seq;
+		++num_rings;
+	}
+
+	/* nothing to wait for ? */
+	if (num_rings == 0)
+		return -ENOENT;
+
+	r = amdgpu_fence_wait_seq_timeout(adev, seq, intr, MAX_SCHEDULE_TIMEOUT);
+	if (r < 0) {
+		return r;
+	}
+	return 0;
+}
+
+/**
+ * amdgpu_fence_wait_next - wait for the next fence to signal
+ *
+ * @adev: amdgpu device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Wait for the next fence on the requested ring to signal (all asics).
+ * Returns 0 if the next fence has passed, error for all other cases.
+ * Caller must hold ring lock.
+ */
+int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
+{
+	uint64_t seq[AMDGPU_MAX_RINGS] = {};
+	long r;
+
+	seq[ring->idx] = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
+	if (seq[ring->idx] >= ring->fence_drv.sync_seq[ring->idx]) {
+		/* nothing to wait for, last_seq is
+		   already the last emited fence */
+		return -ENOENT;
+	}
+	r = amdgpu_fence_wait_seq_timeout(ring->adev, seq, false, MAX_SCHEDULE_TIMEOUT);
+	if (r < 0)
+		return r;
+	return 0;
+}
+
+/**
+ * amdgpu_fence_wait_empty - wait for all fences to signal
+ *
+ * @adev: amdgpu device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Wait for all fences on the requested ring to signal (all asics).
+ * Returns 0 if the fences have passed, error for all other cases.
+ * Caller must hold ring lock.
+ */
+int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	uint64_t seq[AMDGPU_MAX_RINGS] = {};
+	long r;
+
+	seq[ring->idx] = ring->fence_drv.sync_seq[ring->idx];
+	if (!seq[ring->idx])
+		return 0;
+
+	r = amdgpu_fence_wait_seq_timeout(adev, seq, false, MAX_SCHEDULE_TIMEOUT);
+	if (r < 0) {
+		if (r == -EDEADLK)
+			return -EDEADLK;
+
+		dev_err(adev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
+			ring->idx, r);
+	}
+	return 0;
+}
+
+/**
+ * amdgpu_fence_ref - take a ref on a fence
+ *
+ * @fence: amdgpu fence object
+ *
+ * Take a reference on a fence (all asics).
+ * Returns the fence.
+ */
+struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence)
+{
+	fence_get(&fence->base);
+	return fence;
+}
+
+/**
+ * amdgpu_fence_unref - remove a ref on a fence
+ *
+ * @fence: amdgpu fence object
+ *
+ * Remove a reference on a fence (all asics).
+ */
+void amdgpu_fence_unref(struct amdgpu_fence **fence)
+{
+	struct amdgpu_fence *tmp = *fence;
+
+	*fence = NULL;
+	if (tmp)
+		fence_put(&tmp->base);
+}
+
+/**
+ * amdgpu_fence_count_emitted - get the count of emitted fences
+ *
+ * @ring: ring the fence is associated with
+ *
+ * Get the number of fences emitted on the requested ring (all asics).
+ * Returns the number of emitted fences on the ring.  Used by the
+ * dynpm code to ring track activity.
+ */
+unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
+{
+	uint64_t emitted;
+
+	/* We are not protected by ring lock when reading the last sequence
+	 * but it's ok to report slightly wrong fence count here.
+	 */
+	amdgpu_fence_process(ring);
+	emitted = ring->fence_drv.sync_seq[ring->idx]
+		- atomic64_read(&ring->fence_drv.last_seq);
+	/* to avoid 32bits warp around */
+	if (emitted > 0x10000000)
+		emitted = 0x10000000;
+
+	return (unsigned)emitted;
+}
+
+/**
+ * amdgpu_fence_need_sync - do we need a semaphore
+ *
+ * @fence: amdgpu fence object
+ * @dst_ring: which ring to check against
+ *
+ * Check if the fence needs to be synced against another ring
+ * (all asics).  If so, we need to emit a semaphore.
+ * Returns true if we need to sync with another ring, false if
+ * not.
+ */
+bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
+			    struct amdgpu_ring *dst_ring)
+{
+	struct amdgpu_fence_driver *fdrv;
+
+	if (!fence)
+		return false;
+
+	if (fence->ring == dst_ring)
+		return false;
+
+	/* we are protected by the ring mutex */
+	fdrv = &dst_ring->fence_drv;
+	if (fence->seq <= fdrv->sync_seq[fence->ring->idx])
+		return false;
+
+	return true;
+}
+
+/**
+ * amdgpu_fence_note_sync - record the sync point
+ *
+ * @fence: amdgpu fence object
+ * @dst_ring: which ring to check against
+ *
+ * Note the sequence number at which point the fence will
+ * be synced with the requested ring (all asics).
+ */
+void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
+			    struct amdgpu_ring *dst_ring)
+{
+	struct amdgpu_fence_driver *dst, *src;
+	unsigned i;
+
+	if (!fence)
+		return;
+
+	if (fence->ring == dst_ring)
+		return;
+
+	/* we are protected by the ring mutex */
+	src = &fence->ring->fence_drv;
+	dst = &dst_ring->fence_drv;
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+		if (i == dst_ring->idx)
+			continue;
+
+		dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
+	}
+}
+
+/**
+ * amdgpu_fence_driver_start_ring - make the fence driver
+ * ready for use on the requested ring.
+ *
+ * @ring: ring to start the fence driver on
+ * @irq_src: interrupt source to use for this ring
+ * @irq_type: interrupt type to use for this ring
+ *
+ * Make the fence driver ready for processing (all asics).
+ * Not all asics have all rings, so each asic will only
+ * start the fence driver on the rings it has.
+ * Returns 0 for success, errors for failure.
+ */
+int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
+				   struct amdgpu_irq_src *irq_src,
+				   unsigned irq_type)
+{
+	struct amdgpu_device *adev = ring->adev;
+	uint64_t index;
+
+	if (ring != &adev->uvd.ring) {
+		ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
+		ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
+	} else {
+		/* put fence directly behind firmware */
+		index = ALIGN(adev->uvd.fw->size, 8);
+		ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
+		ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
+	}
+	amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
+	ring->fence_drv.initialized = true;
+	ring->fence_drv.irq_src = irq_src;
+	ring->fence_drv.irq_type = irq_type;
+	dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
+		 "cpu addr 0x%p\n", ring->idx,
+		 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
+	return 0;
+}
+
+/**
+ * amdgpu_fence_driver_init_ring - init the fence driver
+ * for the requested ring.
+ *
+ * @ring: ring to init the fence driver on
+ *
+ * Init the fence driver for the requested ring (all asics).
+ * Helper function for amdgpu_fence_driver_init().
+ */
+void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
+{
+	int i;
+
+	ring->fence_drv.cpu_addr = NULL;
+	ring->fence_drv.gpu_addr = 0;
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+		ring->fence_drv.sync_seq[i] = 0;
+
+	atomic64_set(&ring->fence_drv.last_seq, 0);
+	ring->fence_drv.initialized = false;
+
+	INIT_DELAYED_WORK(&ring->fence_drv.lockup_work,
+			amdgpu_fence_check_lockup);
+	ring->fence_drv.ring = ring;
+}
+
+/**
+ * amdgpu_fence_driver_init - init the fence driver
+ * for all possible rings.
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Init the fence driver for all possible rings (all asics).
+ * Not all asics have all rings, so each asic will only
+ * start the fence driver on the rings it has using
+ * amdgpu_fence_driver_start_ring().
+ * Returns 0 for success.
+ */
+int amdgpu_fence_driver_init(struct amdgpu_device *adev)
+{
+	init_waitqueue_head(&adev->fence_queue);
+	if (amdgpu_debugfs_fence_init(adev))
+		dev_err(adev->dev, "fence debugfs file creation failed\n");
+
+	return 0;
+}
+
+/**
+ * amdgpu_fence_driver_fini - tear down the fence driver
+ * for all possible rings.
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Tear down the fence driver for all possible rings (all asics).
+ */
+void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
+{
+	int i, r;
+
+	mutex_lock(&adev->ring_lock);
+	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+		struct amdgpu_ring *ring = adev->rings[i];
+		if (!ring || !ring->fence_drv.initialized)
+			continue;
+		r = amdgpu_fence_wait_empty(ring);
+		if (r) {
+			/* no need to trigger GPU reset as we are unloading */
+			amdgpu_fence_driver_force_completion(adev);
+		}
+		wake_up_all(&adev->fence_queue);
+		ring->fence_drv.initialized = false;
+	}
+	mutex_unlock(&adev->ring_lock);
+}
+
+/**
+ * amdgpu_fence_driver_force_completion - force all fence waiter to complete
+ *
+ * @adev: amdgpu device pointer
+ *
+ * In case of GPU reset failure make sure no process keep waiting on fence
+ * that will never complete.
+ */
+void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
+{
+	int i;
+
+	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+		struct amdgpu_ring *ring = adev->rings[i];
+		if (!ring || !ring->fence_drv.initialized)
+			continue;
+
+		amdgpu_fence_write(ring, ring->fence_drv.sync_seq[i]);
+	}
+}
+
+
+/*
+ * Fence debugfs
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	int i, j;
+
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+		struct amdgpu_ring *ring = adev->rings[i];
+		if (!ring || !ring->fence_drv.initialized)
+			continue;
+
+		amdgpu_fence_process(ring);
+
+		seq_printf(m, "--- ring %d ---\n", i);
+		seq_printf(m, "Last signaled fence 0x%016llx\n",
+			   (unsigned long long)atomic64_read(&ring->fence_drv.last_seq));
+		seq_printf(m, "Last emitted        0x%016llx\n",
+			   ring->fence_drv.sync_seq[i]);
+
+		for (j = 0; j < AMDGPU_MAX_RINGS; ++j) {
+			struct amdgpu_ring *other = adev->rings[j];
+			if (i != j && other && other->fence_drv.initialized)
+				seq_printf(m, "Last sync to ring %d 0x%016llx\n",
+					   j, ring->fence_drv.sync_seq[j]);
+		}
+	}
+	return 0;
+}
+
+static struct drm_info_list amdgpu_debugfs_fence_list[] = {
+	{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
+};
+#endif
+
+int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 1);
+#else
+	return 0;
+#endif
+}
+
+static const char *amdgpu_fence_get_driver_name(struct fence *fence)
+{
+	return "amdgpu";
+}
+
+static const char *amdgpu_fence_get_timeline_name(struct fence *f)
+{
+	struct amdgpu_fence *fence = to_amdgpu_fence(f);
+	return (const char *)fence->ring->name;
+}
+
+static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence)
+{
+	return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
+}
+
+struct amdgpu_wait_cb {
+	struct fence_cb base;
+	struct task_struct *task;
+};
+
+static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
+{
+	struct amdgpu_wait_cb *wait =
+		container_of(cb, struct amdgpu_wait_cb, base);
+	wake_up_process(wait->task);
+}
+
+static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
+					     signed long t)
+{
+	struct amdgpu_fence *fence = to_amdgpu_fence(f);
+	struct amdgpu_device *adev = fence->ring->adev;
+	struct amdgpu_wait_cb cb;
+
+	cb.task = current;
+
+	if (fence_add_callback(f, &cb.base, amdgpu_fence_wait_cb))
+		return t;
+
+	while (t > 0) {
+		if (intr)
+			set_current_state(TASK_INTERRUPTIBLE);
+		else
+			set_current_state(TASK_UNINTERRUPTIBLE);
+
+		/*
+		 * amdgpu_test_signaled must be called after
+		 * set_current_state to prevent a race with wake_up_process
+		 */
+		if (amdgpu_test_signaled(fence))
+			break;
+
+		if (adev->needs_reset) {
+			t = -EDEADLK;
+			break;
+		}
+
+		t = schedule_timeout(t);
+
+		if (t > 0 && intr && signal_pending(current))
+			t = -ERESTARTSYS;
+	}
+
+	__set_current_state(TASK_RUNNING);
+	fence_remove_callback(f, &cb.base);
+
+	return t;
+}
+
+const struct fence_ops amdgpu_fence_ops = {
+	.get_driver_name = amdgpu_fence_get_driver_name,
+	.get_timeline_name = amdgpu_fence_get_timeline_name,
+	.enable_signaling = amdgpu_fence_enable_signaling,
+	.signaled = amdgpu_fence_is_signaled,
+	.wait = amdgpu_fence_default_wait,
+	.release = NULL,
+};

+ 371 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c

@@ -0,0 +1,371 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+
+/*
+ * GART
+ * The GART (Graphics Aperture Remapping Table) is an aperture
+ * in the GPU's address space.  System pages can be mapped into
+ * the aperture and look like contiguous pages from the GPU's
+ * perspective.  A page table maps the pages in the aperture
+ * to the actual backing pages in system memory.
+ *
+ * Radeon GPUs support both an internal GART, as described above,
+ * and AGP.  AGP works similarly, but the GART table is configured
+ * and maintained by the northbridge rather than the driver.
+ * Radeon hw has a separate AGP aperture that is programmed to
+ * point to the AGP aperture provided by the northbridge and the
+ * requests are passed through to the northbridge aperture.
+ * Both AGP and internal GART can be used at the same time, however
+ * that is not currently supported by the driver.
+ *
+ * This file handles the common internal GART management.
+ */
+
+/*
+ * Common GART table functions.
+ */
+/**
+ * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate system memory for GART page table
+ * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
+ * gart table to be in system memory.
+ * Returns 0 for success, -ENOMEM for failure.
+ */
+int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
+{
+	void *ptr;
+
+	ptr = pci_alloc_consistent(adev->pdev, adev->gart.table_size,
+				   &adev->gart.table_addr);
+	if (ptr == NULL) {
+		return -ENOMEM;
+	}
+#ifdef CONFIG_X86
+	if (0) {
+		set_memory_uc((unsigned long)ptr,
+			      adev->gart.table_size >> PAGE_SHIFT);
+	}
+#endif
+	adev->gart.ptr = ptr;
+	memset((void *)adev->gart.ptr, 0, adev->gart.table_size);
+	return 0;
+}
+
+/**
+ * amdgpu_gart_table_ram_free - free system ram for gart page table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Free system memory for GART page table
+ * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
+ * gart table to be in system memory.
+ */
+void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
+{
+	if (adev->gart.ptr == NULL) {
+		return;
+	}
+#ifdef CONFIG_X86
+	if (0) {
+		set_memory_wb((unsigned long)adev->gart.ptr,
+			      adev->gart.table_size >> PAGE_SHIFT);
+	}
+#endif
+	pci_free_consistent(adev->pdev, adev->gart.table_size,
+			    (void *)adev->gart.ptr,
+			    adev->gart.table_addr);
+	adev->gart.ptr = NULL;
+	adev->gart.table_addr = 0;
+}
+
+/**
+ * amdgpu_gart_table_vram_alloc - allocate vram for gart page table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate video memory for GART page table
+ * (pcie r4xx, r5xx+).  These asics require the
+ * gart table to be in video memory.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
+{
+	int r;
+
+	if (adev->gart.robj == NULL) {
+		r = amdgpu_bo_create(adev, adev->gart.table_size,
+				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
+				     NULL, &adev->gart.robj);
+		if (r) {
+			return r;
+		}
+	}
+	return 0;
+}
+
+/**
+ * amdgpu_gart_table_vram_pin - pin gart page table in vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Pin the GART page table in vram so it will not be moved
+ * by the memory manager (pcie r4xx, r5xx+).  These asics require the
+ * gart table to be in video memory.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
+{
+	uint64_t gpu_addr;
+	int r;
+
+	r = amdgpu_bo_reserve(adev->gart.robj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = amdgpu_bo_pin(adev->gart.robj,
+				AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
+	if (r) {
+		amdgpu_bo_unreserve(adev->gart.robj);
+		return r;
+	}
+	r = amdgpu_bo_kmap(adev->gart.robj, &adev->gart.ptr);
+	if (r)
+		amdgpu_bo_unpin(adev->gart.robj);
+	amdgpu_bo_unreserve(adev->gart.robj);
+	adev->gart.table_addr = gpu_addr;
+	return r;
+}
+
+/**
+ * amdgpu_gart_table_vram_unpin - unpin gart page table in vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Unpin the GART page table in vram (pcie r4xx, r5xx+).
+ * These asics require the gart table to be in video memory.
+ */
+void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
+{
+	int r;
+
+	if (adev->gart.robj == NULL) {
+		return;
+	}
+	r = amdgpu_bo_reserve(adev->gart.robj, false);
+	if (likely(r == 0)) {
+		amdgpu_bo_kunmap(adev->gart.robj);
+		amdgpu_bo_unpin(adev->gart.robj);
+		amdgpu_bo_unreserve(adev->gart.robj);
+		adev->gart.ptr = NULL;
+	}
+}
+
+/**
+ * amdgpu_gart_table_vram_free - free gart page table vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Free the video memory used for the GART page table
+ * (pcie r4xx, r5xx+).  These asics require the gart table to
+ * be in video memory.
+ */
+void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
+{
+	if (adev->gart.robj == NULL) {
+		return;
+	}
+	amdgpu_bo_unref(&adev->gart.robj);
+}
+
+/*
+ * Common gart functions.
+ */
+/**
+ * amdgpu_gart_unbind - unbind pages from the gart page table
+ *
+ * @adev: amdgpu_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to unbind
+ *
+ * Unbinds the requested pages from the gart page table and
+ * replaces them with the dummy page (all asics).
+ */
+void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
+			int pages)
+{
+	unsigned t;
+	unsigned p;
+	int i, j;
+	u64 page_base;
+	uint32_t flags = AMDGPU_PTE_SYSTEM;
+
+	if (!adev->gart.ready) {
+		WARN(1, "trying to unbind memory from uninitialized GART !\n");
+		return;
+	}
+
+	t = offset / AMDGPU_GPU_PAGE_SIZE;
+	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+	for (i = 0; i < pages; i++, p++) {
+		if (adev->gart.pages[p]) {
+			adev->gart.pages[p] = NULL;
+			adev->gart.pages_addr[p] = adev->dummy_page.addr;
+			page_base = adev->gart.pages_addr[p];
+			if (!adev->gart.ptr)
+				continue;
+
+			for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+				amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
+							t, page_base, flags);
+				page_base += AMDGPU_GPU_PAGE_SIZE;
+			}
+		}
+	}
+	mb();
+	amdgpu_gart_flush_gpu_tlb(adev, 0);
+}
+
+/**
+ * amdgpu_gart_bind - bind pages into the gart page table
+ *
+ * @adev: amdgpu_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to bind
+ * @pagelist: pages to bind
+ * @dma_addr: DMA addresses of pages
+ *
+ * Binds the requested pages to the gart page table
+ * (all asics).
+ * Returns 0 for success, -EINVAL for failure.
+ */
+int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
+		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
+		     uint32_t flags)
+{
+	unsigned t;
+	unsigned p;
+	uint64_t page_base;
+	int i, j;
+
+	if (!adev->gart.ready) {
+		WARN(1, "trying to bind memory to uninitialized GART !\n");
+		return -EINVAL;
+	}
+
+	t = offset / AMDGPU_GPU_PAGE_SIZE;
+	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+
+	for (i = 0; i < pages; i++, p++) {
+		adev->gart.pages_addr[p] = dma_addr[i];
+		adev->gart.pages[p] = pagelist[i];
+		if (adev->gart.ptr) {
+			page_base = adev->gart.pages_addr[p];
+			for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+				amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, t, page_base, flags);
+				page_base += AMDGPU_GPU_PAGE_SIZE;
+			}
+		}
+	}
+	mb();
+	amdgpu_gart_flush_gpu_tlb(adev, 0);
+	return 0;
+}
+
+/**
+ * amdgpu_gart_init - init the driver info for managing the gart
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate the dummy page and init the gart driver info (all asics).
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_gart_init(struct amdgpu_device *adev)
+{
+	int r, i;
+
+	if (adev->gart.pages) {
+		return 0;
+	}
+	/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
+	if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
+		DRM_ERROR("Page size is smaller than GPU page size!\n");
+		return -EINVAL;
+	}
+	r = amdgpu_dummy_page_init(adev);
+	if (r)
+		return r;
+	/* Compute table size */
+	adev->gart.num_cpu_pages = adev->mc.gtt_size / PAGE_SIZE;
+	adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE;
+	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
+		 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
+	/* Allocate pages table */
+	adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
+	if (adev->gart.pages == NULL) {
+		amdgpu_gart_fini(adev);
+		return -ENOMEM;
+	}
+	adev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
+					adev->gart.num_cpu_pages);
+	if (adev->gart.pages_addr == NULL) {
+		amdgpu_gart_fini(adev);
+		return -ENOMEM;
+	}
+	/* set GART entry to point to the dummy page by default */
+	for (i = 0; i < adev->gart.num_cpu_pages; i++) {
+		adev->gart.pages_addr[i] = adev->dummy_page.addr;
+	}
+	return 0;
+}
+
+/**
+ * amdgpu_gart_fini - tear down the driver info for managing the gart
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down the gart driver info and free the dummy page (all asics).
+ */
+void amdgpu_gart_fini(struct amdgpu_device *adev)
+{
+	if (adev->gart.pages && adev->gart.pages_addr && adev->gart.ready) {
+		/* unbind pages */
+		amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
+	}
+	adev->gart.ready = false;
+	vfree(adev->gart.pages);
+	vfree(adev->gart.pages_addr);
+	adev->gart.pages = NULL;
+	adev->gart.pages_addr = NULL;
+
+	amdgpu_dummy_page_fini(adev);
+}

+ 72 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h

@@ -0,0 +1,72 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_GDS_H__
+#define __AMDGPU_GDS_H__
+
+/* Because TTM request that alloacted buffer should be PAGE_SIZE aligned,
+ * we should report GDS/GWS/OA size as PAGE_SIZE aligned
+ * */
+#define AMDGPU_GDS_SHIFT	2
+#define AMDGPU_GWS_SHIFT	PAGE_SHIFT
+#define AMDGPU_OA_SHIFT		PAGE_SHIFT
+
+#define AMDGPU_PL_GDS		TTM_PL_PRIV0
+#define AMDGPU_PL_GWS		TTM_PL_PRIV1
+#define AMDGPU_PL_OA		TTM_PL_PRIV2
+
+#define AMDGPU_PL_FLAG_GDS		TTM_PL_FLAG_PRIV0
+#define AMDGPU_PL_FLAG_GWS		TTM_PL_FLAG_PRIV1
+#define AMDGPU_PL_FLAG_OA		TTM_PL_FLAG_PRIV2
+
+struct amdgpu_ring;
+struct amdgpu_bo;
+
+struct amdgpu_gds_asic_info {
+	uint32_t 	total_size;
+	uint32_t	gfx_partition_size;
+	uint32_t	cs_partition_size;
+};
+
+struct amdgpu_gds {
+	struct amdgpu_gds_asic_info	mem;
+	struct amdgpu_gds_asic_info	gws;
+	struct amdgpu_gds_asic_info	oa;
+	/* At present, GDS, GWS and OA resources for gfx (graphics) 
+	 * is always pre-allocated and available for graphics operation. 
+	 * Such resource is shared between all gfx clients.
+	 * TODO: move this operation to user space
+	 * */
+	struct amdgpu_bo*		gds_gfx_bo;
+	struct amdgpu_bo*		gws_gfx_bo;
+	struct amdgpu_bo*		oa_gfx_bo;
+};
+
+struct amdgpu_gds_reg_offset {
+	uint32_t	mem_base;
+	uint32_t	mem_size;
+	uint32_t	gws;
+	uint32_t	oa;
+};
+
+#endif /* __AMDGPU_GDS_H__ */

+ 737 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c

@@ -0,0 +1,737 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/ktime.h>
+#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+
+void amdgpu_gem_object_free(struct drm_gem_object *gobj)
+{
+	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
+
+	if (robj) {
+		if (robj->gem_base.import_attach)
+			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
+		amdgpu_bo_unref(&robj);
+	}
+}
+
+int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
+				int alignment, u32 initial_domain,
+				u64 flags, bool kernel,
+				struct drm_gem_object **obj)
+{
+	struct amdgpu_bo *robj;
+	unsigned long max_size;
+	int r;
+
+	*obj = NULL;
+	/* At least align on page size */
+	if (alignment < PAGE_SIZE) {
+		alignment = PAGE_SIZE;
+	}
+
+	if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
+		/* Maximum bo size is the unpinned gtt size since we use the gtt to
+		 * handle vram to system pool migrations.
+		 */
+		max_size = adev->mc.gtt_size - adev->gart_pin_size;
+		if (size > max_size) {
+			DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
+				  size >> 20, max_size >> 20);
+			return -ENOMEM;
+		}
+	}
+retry:
+	r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, flags, NULL, &robj);
+	if (r) {
+		if (r != -ERESTARTSYS) {
+			if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
+				initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
+				goto retry;
+			}
+			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
+				  size, initial_domain, alignment, r);
+		}
+		return r;
+	}
+	*obj = &robj->gem_base;
+	robj->pid = task_pid_nr(current);
+
+	mutex_lock(&adev->gem.mutex);
+	list_add_tail(&robj->list, &adev->gem.objects);
+	mutex_unlock(&adev->gem.mutex);
+
+	return 0;
+}
+
+int amdgpu_gem_init(struct amdgpu_device *adev)
+{
+	INIT_LIST_HEAD(&adev->gem.objects);
+	return 0;
+}
+
+void amdgpu_gem_fini(struct amdgpu_device *adev)
+{
+	amdgpu_bo_force_delete(adev);
+}
+
+/*
+ * Call from drm_gem_handle_create which appear in both new and open ioctl
+ * case.
+ */
+int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+{
+	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
+	struct amdgpu_device *adev = rbo->adev;
+	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+	struct amdgpu_vm *vm = &fpriv->vm;
+	struct amdgpu_bo_va *bo_va;
+	int r;
+
+	r = amdgpu_bo_reserve(rbo, false);
+	if (r) {
+		return r;
+	}
+
+	bo_va = amdgpu_vm_bo_find(vm, rbo);
+	if (!bo_va) {
+		bo_va = amdgpu_vm_bo_add(adev, vm, rbo);
+	} else {
+		++bo_va->ref_count;
+	}
+	amdgpu_bo_unreserve(rbo);
+
+	return 0;
+}
+
+void amdgpu_gem_object_close(struct drm_gem_object *obj,
+			     struct drm_file *file_priv)
+{
+	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
+	struct amdgpu_device *adev = rbo->adev;
+	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+	struct amdgpu_vm *vm = &fpriv->vm;
+	struct amdgpu_bo_va *bo_va;
+	int r;
+
+	r = amdgpu_bo_reserve(rbo, true);
+	if (r) {
+		dev_err(adev->dev, "leaking bo va because "
+			"we fail to reserve bo (%d)\n", r);
+		return;
+	}
+	bo_va = amdgpu_vm_bo_find(vm, rbo);
+	if (bo_va) {
+		if (--bo_va->ref_count == 0) {
+			amdgpu_vm_bo_rmv(adev, bo_va);
+		}
+	}
+	amdgpu_bo_unreserve(rbo);
+}
+
+static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
+{
+	if (r == -EDEADLK) {
+		r = amdgpu_gpu_reset(adev);
+		if (!r)
+			r = -EAGAIN;
+	}
+	return r;
+}
+
+/*
+ * GEM ioctls.
+ */
+int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *filp)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+	union drm_amdgpu_gem_create *args = data;
+	uint64_t size = args->in.bo_size;
+	struct drm_gem_object *gobj;
+	uint32_t handle;
+	bool kernel = false;
+	int r;
+
+	down_read(&adev->exclusive_lock);
+	/* create a gem object to contain this object in */
+	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
+	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
+		kernel = true;
+		if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
+			size = size << AMDGPU_GDS_SHIFT;
+		else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
+			size = size << AMDGPU_GWS_SHIFT;
+		else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
+			size = size << AMDGPU_OA_SHIFT;
+		else {
+			r = -EINVAL;
+			goto error_unlock;
+		}
+	}
+	size = roundup(size, PAGE_SIZE);
+
+	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
+				     (u32)(0xffffffff & args->in.domains),
+				     args->in.domain_flags,
+				     kernel, &gobj);
+	if (r)
+		goto error_unlock;
+
+	r = drm_gem_handle_create(filp, gobj, &handle);
+	/* drop reference from allocate - handle holds it now */
+	drm_gem_object_unreference_unlocked(gobj);
+	if (r)
+		goto error_unlock;
+
+	memset(args, 0, sizeof(*args));
+	args->out.handle = handle;
+	up_read(&adev->exclusive_lock);
+	return 0;
+
+error_unlock:
+	up_read(&adev->exclusive_lock);
+	r = amdgpu_gem_handle_lockup(adev, r);
+	return r;
+}
+
+int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *filp)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+	struct drm_amdgpu_gem_userptr *args = data;
+	struct drm_gem_object *gobj;
+	struct amdgpu_bo *bo;
+	uint32_t handle;
+	int r;
+
+	if (offset_in_page(args->addr | args->size))
+		return -EINVAL;
+
+	/* reject unknown flag values */
+	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
+	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
+	    AMDGPU_GEM_USERPTR_REGISTER))
+		return -EINVAL;
+
+	if (!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
+		   !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
+
+		/* if we want to write to it we must require anonymous
+		   memory and install a MMU notifier */
+		return -EACCES;
+	}
+
+	down_read(&adev->exclusive_lock);
+
+	/* create a gem object to contain this object in */
+	r = amdgpu_gem_object_create(adev, args->size, 0,
+				     AMDGPU_GEM_DOMAIN_CPU, 0,
+				     0, &gobj);
+	if (r)
+		goto handle_lockup;
+
+	bo = gem_to_amdgpu_bo(gobj);
+	r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
+	if (r)
+		goto release_object;
+
+	if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
+		r = amdgpu_mn_register(bo, args->addr);
+		if (r)
+			goto release_object;
+	}
+
+	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
+		down_read(&current->mm->mmap_sem);
+		r = amdgpu_bo_reserve(bo, true);
+		if (r) {
+			up_read(&current->mm->mmap_sem);
+			goto release_object;
+		}
+
+		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+		amdgpu_bo_unreserve(bo);
+		up_read(&current->mm->mmap_sem);
+		if (r)
+			goto release_object;
+	}
+
+	r = drm_gem_handle_create(filp, gobj, &handle);
+	/* drop reference from allocate - handle holds it now */
+	drm_gem_object_unreference_unlocked(gobj);
+	if (r)
+		goto handle_lockup;
+
+	args->handle = handle;
+	up_read(&adev->exclusive_lock);
+	return 0;
+
+release_object:
+	drm_gem_object_unreference_unlocked(gobj);
+
+handle_lockup:
+	up_read(&adev->exclusive_lock);
+	r = amdgpu_gem_handle_lockup(adev, r);
+
+	return r;
+}
+
+int amdgpu_mode_dumb_mmap(struct drm_file *filp,
+			  struct drm_device *dev,
+			  uint32_t handle, uint64_t *offset_p)
+{
+	struct drm_gem_object *gobj;
+	struct amdgpu_bo *robj;
+
+	gobj = drm_gem_object_lookup(dev, filp, handle);
+	if (gobj == NULL) {
+		return -ENOENT;
+	}
+	robj = gem_to_amdgpu_bo(gobj);
+	if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm) ||
+	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
+		drm_gem_object_unreference_unlocked(gobj);
+		return -EPERM;
+	}
+	*offset_p = amdgpu_bo_mmap_offset(robj);
+	drm_gem_object_unreference_unlocked(gobj);
+	return 0;
+}
+
+int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp)
+{
+	union drm_amdgpu_gem_mmap *args = data;
+	uint32_t handle = args->in.handle;
+	memset(args, 0, sizeof(*args));
+	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
+}
+
+/**
+ * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
+ *
+ * @timeout_ns: timeout in ns
+ *
+ * Calculate the timeout in jiffies from an absolute timeout in ns.
+ */
+unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
+{
+	unsigned long timeout_jiffies;
+	ktime_t timeout;
+
+	/* clamp timeout if it's to large */
+	if (((int64_t)timeout_ns) < 0)
+		return MAX_SCHEDULE_TIMEOUT;
+
+	timeout = ktime_sub_ns(ktime_get(), timeout_ns);
+	if (ktime_to_ns(timeout) < 0)
+		return 0;
+
+	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
+	/*  clamp timeout to avoid unsigned-> signed overflow */
+	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
+		return MAX_SCHEDULE_TIMEOUT - 1;
+
+	return timeout_jiffies;
+}
+
+int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *filp)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+	union drm_amdgpu_gem_wait_idle *args = data;
+	struct drm_gem_object *gobj;
+	struct amdgpu_bo *robj;
+	uint32_t handle = args->in.handle;
+	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
+	int r = 0;
+	long ret;
+
+	gobj = drm_gem_object_lookup(dev, filp, handle);
+	if (gobj == NULL) {
+		return -ENOENT;
+	}
+	robj = gem_to_amdgpu_bo(gobj);
+	if (timeout == 0)
+		ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
+	else
+		ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout);
+
+	/* ret == 0 means not signaled,
+	 * ret > 0 means signaled
+	 * ret < 0 means interrupted before timeout
+	 */
+	if (ret >= 0) {
+		memset(args, 0, sizeof(*args));
+		args->out.status = (ret == 0);
+	} else
+		r = ret;
+
+	drm_gem_object_unreference_unlocked(gobj);
+	r = amdgpu_gem_handle_lockup(adev, r);
+	return r;
+}
+
+int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp)
+{
+	struct drm_amdgpu_gem_metadata *args = data;
+	struct drm_gem_object *gobj;
+	struct amdgpu_bo *robj;
+	int r = -1;
+
+	DRM_DEBUG("%d \n", args->handle);
+	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	if (gobj == NULL)
+		return -ENOENT;
+	robj = gem_to_amdgpu_bo(gobj);
+
+	r = amdgpu_bo_reserve(robj, false);
+	if (unlikely(r != 0))
+		goto out;
+
+	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
+		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
+		r = amdgpu_bo_get_metadata(robj, args->data.data,
+					   sizeof(args->data.data),
+					   &args->data.data_size_bytes,
+					   &args->data.flags);
+	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
+		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
+		if (!r)
+			r = amdgpu_bo_set_metadata(robj, args->data.data,
+						   args->data.data_size_bytes,
+						   args->data.flags);
+	}
+
+	amdgpu_bo_unreserve(robj);
+out:
+	drm_gem_object_unreference_unlocked(gobj);
+	return r;
+}
+
+/**
+ * amdgpu_gem_va_update_vm -update the bo_va in its VM
+ *
+ * @adev: amdgpu_device pointer
+ * @bo_va: bo_va to update
+ *
+ * Update the bo_va directly after setting it's address. Errors are not
+ * vital here, so they are not reported back to userspace.
+ */
+static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
+				    struct amdgpu_bo_va *bo_va)
+{
+	struct ttm_validate_buffer tv, *entry;
+	struct amdgpu_bo_list_entry *vm_bos;
+	struct ww_acquire_ctx ticket;
+	struct list_head list;
+	unsigned domain;
+	int r;
+
+	INIT_LIST_HEAD(&list);
+
+	tv.bo = &bo_va->bo->tbo;
+	tv.shared = true;
+	list_add(&tv.head, &list);
+
+	vm_bos = amdgpu_vm_get_bos(adev, bo_va->vm, &list);
+	if (!vm_bos)
+		return;
+
+	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+	if (r)
+		goto error_free;
+
+	list_for_each_entry(entry, &list, head) {
+		domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
+		/* if anything is swapped out don't swap it in here,
+		   just abort and wait for the next CS */
+		if (domain == AMDGPU_GEM_DOMAIN_CPU)
+			goto error_unreserve;
+	}
+
+	mutex_lock(&bo_va->vm->mutex);
+	r = amdgpu_vm_clear_freed(adev, bo_va->vm);
+	if (r)
+		goto error_unlock;
+
+	r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
+
+error_unlock:
+	mutex_unlock(&bo_va->vm->mutex);
+
+error_unreserve:
+	ttm_eu_backoff_reservation(&ticket, &list);
+
+error_free:
+	drm_free_large(vm_bos);
+
+	if (r)
+		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
+}
+
+
+
+int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp)
+{
+	union drm_amdgpu_gem_va *args = data;
+	struct drm_gem_object *gobj;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_fpriv *fpriv = filp->driver_priv;
+	struct amdgpu_bo *rbo;
+	struct amdgpu_bo_va *bo_va;
+	uint32_t invalid_flags, va_flags = 0;
+	int r = 0;
+
+	if (!adev->vm_manager.enabled) {
+		memset(args, 0, sizeof(*args));
+		args->out.result = AMDGPU_VA_RESULT_ERROR;
+		return -ENOTTY;
+	}
+
+	if (args->in.va_address < AMDGPU_VA_RESERVED_SIZE) {
+		dev_err(&dev->pdev->dev,
+			"va_address 0x%lX is in reserved area 0x%X\n",
+			(unsigned long)args->in.va_address,
+			AMDGPU_VA_RESERVED_SIZE);
+		memset(args, 0, sizeof(*args));
+		args->out.result = AMDGPU_VA_RESULT_ERROR;
+		return -EINVAL;
+	}
+
+	invalid_flags = ~(AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
+			AMDGPU_VM_PAGE_EXECUTABLE);
+	if ((args->in.flags & invalid_flags)) {
+		dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
+			args->in.flags, invalid_flags);
+		memset(args, 0, sizeof(*args));
+		args->out.result = AMDGPU_VA_RESULT_ERROR;
+		return -EINVAL;
+	}
+
+	switch (args->in.operation) {
+	case AMDGPU_VA_OP_MAP:
+	case AMDGPU_VA_OP_UNMAP:
+		break;
+	default:
+		dev_err(&dev->pdev->dev, "unsupported operation %d\n",
+			args->in.operation);
+		memset(args, 0, sizeof(*args));
+		args->out.result = AMDGPU_VA_RESULT_ERROR;
+		return -EINVAL;
+	}
+
+	gobj = drm_gem_object_lookup(dev, filp, args->in.handle);
+	if (gobj == NULL) {
+		memset(args, 0, sizeof(*args));
+		args->out.result = AMDGPU_VA_RESULT_ERROR;
+		return -ENOENT;
+	}
+	rbo = gem_to_amdgpu_bo(gobj);
+	r = amdgpu_bo_reserve(rbo, false);
+	if (r) {
+		if (r != -ERESTARTSYS) {
+			memset(args, 0, sizeof(*args));
+			args->out.result = AMDGPU_VA_RESULT_ERROR;
+		}
+		drm_gem_object_unreference_unlocked(gobj);
+		return r;
+	}
+	bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
+	if (!bo_va) {
+		memset(args, 0, sizeof(*args));
+		args->out.result = AMDGPU_VA_RESULT_ERROR;
+		drm_gem_object_unreference_unlocked(gobj);
+		return -ENOENT;
+	}
+
+	switch (args->in.operation) {
+	case AMDGPU_VA_OP_MAP:
+		if (args->in.flags & AMDGPU_VM_PAGE_READABLE)
+			va_flags |= AMDGPU_PTE_READABLE;
+		if (args->in.flags & AMDGPU_VM_PAGE_WRITEABLE)
+			va_flags |= AMDGPU_PTE_WRITEABLE;
+		if (args->in.flags & AMDGPU_VM_PAGE_EXECUTABLE)
+			va_flags |= AMDGPU_PTE_EXECUTABLE;
+		r = amdgpu_vm_bo_map(adev, bo_va, args->in.va_address,
+				     args->in.offset_in_bo, args->in.map_size,
+				     va_flags);
+		break;
+	case AMDGPU_VA_OP_UNMAP:
+		r = amdgpu_vm_bo_unmap(adev, bo_va, args->in.va_address);
+		break;
+	default:
+		break;
+	}
+
+	if (!r) {
+		amdgpu_gem_va_update_vm(adev, bo_va);
+		memset(args, 0, sizeof(*args));
+		args->out.result = AMDGPU_VA_RESULT_OK;
+	} else {
+		memset(args, 0, sizeof(*args));
+		args->out.result = AMDGPU_VA_RESULT_ERROR;
+	}
+
+	drm_gem_object_unreference_unlocked(gobj);
+	return r;
+}
+
+int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *filp)
+{
+	struct drm_amdgpu_gem_op *args = data;
+	struct drm_gem_object *gobj;
+	struct amdgpu_bo *robj;
+	int r;
+
+	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	if (gobj == NULL) {
+		return -ENOENT;
+	}
+	robj = gem_to_amdgpu_bo(gobj);
+
+	r = amdgpu_bo_reserve(robj, false);
+	if (unlikely(r))
+		goto out;
+
+	switch (args->op) {
+	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
+		struct drm_amdgpu_gem_create_in info;
+		void __user *out = (void __user *)(long)args->value;
+
+		info.bo_size = robj->gem_base.size;
+		info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
+		info.domains = robj->initial_domain;
+		info.domain_flags = robj->flags;
+		if (copy_to_user(out, &info, sizeof(info)))
+			r = -EFAULT;
+		break;
+	}
+	case AMDGPU_GEM_OP_SET_PLACEMENT:
+		if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) {
+			r = -EPERM;
+			break;
+		}
+		robj->initial_domain = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
+						      AMDGPU_GEM_DOMAIN_GTT |
+						      AMDGPU_GEM_DOMAIN_CPU);
+		break;
+	default:
+		r = -EINVAL;
+	}
+
+	amdgpu_bo_unreserve(robj);
+out:
+	drm_gem_object_unreference_unlocked(gobj);
+	return r;
+}
+
+int amdgpu_mode_dumb_create(struct drm_file *file_priv,
+			    struct drm_device *dev,
+			    struct drm_mode_create_dumb *args)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+	struct drm_gem_object *gobj;
+	uint32_t handle;
+	int r;
+
+	args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
+	args->size = args->pitch * args->height;
+	args->size = ALIGN(args->size, PAGE_SIZE);
+
+	r = amdgpu_gem_object_create(adev, args->size, 0,
+				     AMDGPU_GEM_DOMAIN_VRAM,
+				     0, ttm_bo_type_device,
+				     &gobj);
+	if (r)
+		return -ENOMEM;
+
+	r = drm_gem_handle_create(file_priv, gobj, &handle);
+	/* drop reference from allocate - handle holds it now */
+	drm_gem_object_unreference_unlocked(gobj);
+	if (r) {
+		return r;
+	}
+	args->handle = handle;
+	return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_bo *rbo;
+	unsigned i = 0;
+
+	mutex_lock(&adev->gem.mutex);
+	list_for_each_entry(rbo, &adev->gem.objects, list) {
+		unsigned domain;
+		const char *placement;
+
+		domain = amdgpu_mem_type_to_domain(rbo->tbo.mem.mem_type);
+		switch (domain) {
+		case AMDGPU_GEM_DOMAIN_VRAM:
+			placement = "VRAM";
+			break;
+		case AMDGPU_GEM_DOMAIN_GTT:
+			placement = " GTT";
+			break;
+		case AMDGPU_GEM_DOMAIN_CPU:
+		default:
+			placement = " CPU";
+			break;
+		}
+		seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
+			   i, amdgpu_bo_size(rbo) >> 10, amdgpu_bo_size(rbo) >> 20,
+			   placement, (unsigned long)rbo->pid);
+		i++;
+	}
+	mutex_unlock(&adev->gem.mutex);
+	return 0;
+}
+
+static struct drm_info_list amdgpu_debugfs_gem_list[] = {
+	{"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
+};
+#endif
+
+int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
+#endif
+	return 0;
+}

+ 72 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c

@@ -0,0 +1,72 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <drm/drmP.h>
+#include "amdgpu.h"
+
+/*
+ * GPU scratch registers helpers function.
+ */
+/**
+ * amdgpu_gfx_scratch_get - Allocate a scratch register
+ *
+ * @adev: amdgpu_device pointer
+ * @reg: scratch register mmio offset
+ *
+ * Allocate a CP scratch register for use by the driver (all asics).
+ * Returns 0 on success or -EINVAL on failure.
+ */
+int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg)
+{
+	int i;
+
+	for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
+		if (adev->gfx.scratch.free[i]) {
+			adev->gfx.scratch.free[i] = false;
+			*reg = adev->gfx.scratch.reg[i];
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+/**
+ * amdgpu_gfx_scratch_free - Free a scratch register
+ *
+ * @adev: amdgpu_device pointer
+ * @reg: scratch register mmio offset
+ *
+ * Free a CP scratch register allocated for use by the driver (all asics)
+ */
+void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg)
+{
+	int i;
+
+	for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
+		if (adev->gfx.scratch.reg[i] == reg) {
+			adev->gfx.scratch.free[i] = true;
+			return;
+		}
+	}
+}

+ 30 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h

@@ -0,0 +1,30 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_GFX_H__
+#define __AMDGPU_GFX_H__
+
+int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
+void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
+
+#endif

+ 395 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c

@@ -0,0 +1,395 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <linux/export.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_edid.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+#include "amdgpu_i2c.h"
+#include "amdgpu_atombios.h"
+#include "atom.h"
+#include "atombios_dp.h"
+#include "atombios_i2c.h"
+
+/* bit banging i2c */
+static int amdgpu_i2c_pre_xfer(struct i2c_adapter *i2c_adap)
+{
+	struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+	struct amdgpu_device *adev = i2c->dev->dev_private;
+	struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t temp;
+
+	mutex_lock(&i2c->mutex);
+
+	/* switch the pads to ddc mode */
+	if (rec->hw_capable) {
+		temp = RREG32(rec->mask_clk_reg);
+		temp &= ~(1 << 16);
+		WREG32(rec->mask_clk_reg, temp);
+	}
+
+	/* clear the output pin values */
+	temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
+	WREG32(rec->a_clk_reg, temp);
+
+	temp = RREG32(rec->a_data_reg) & ~rec->a_data_mask;
+	WREG32(rec->a_data_reg, temp);
+
+	/* set the pins to input */
+	temp = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
+	WREG32(rec->en_clk_reg, temp);
+
+	temp = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
+	WREG32(rec->en_data_reg, temp);
+
+	/* mask the gpio pins for software use */
+	temp = RREG32(rec->mask_clk_reg) | rec->mask_clk_mask;
+	WREG32(rec->mask_clk_reg, temp);
+	temp = RREG32(rec->mask_clk_reg);
+
+	temp = RREG32(rec->mask_data_reg) | rec->mask_data_mask;
+	WREG32(rec->mask_data_reg, temp);
+	temp = RREG32(rec->mask_data_reg);
+
+	return 0;
+}
+
+static void amdgpu_i2c_post_xfer(struct i2c_adapter *i2c_adap)
+{
+	struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+	struct amdgpu_device *adev = i2c->dev->dev_private;
+	struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t temp;
+
+	/* unmask the gpio pins for software use */
+	temp = RREG32(rec->mask_clk_reg) & ~rec->mask_clk_mask;
+	WREG32(rec->mask_clk_reg, temp);
+	temp = RREG32(rec->mask_clk_reg);
+
+	temp = RREG32(rec->mask_data_reg) & ~rec->mask_data_mask;
+	WREG32(rec->mask_data_reg, temp);
+	temp = RREG32(rec->mask_data_reg);
+
+	mutex_unlock(&i2c->mutex);
+}
+
+static int amdgpu_i2c_get_clock(void *i2c_priv)
+{
+	struct amdgpu_i2c_chan *i2c = i2c_priv;
+	struct amdgpu_device *adev = i2c->dev->dev_private;
+	struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t val;
+
+	/* read the value off the pin */
+	val = RREG32(rec->y_clk_reg);
+	val &= rec->y_clk_mask;
+
+	return (val != 0);
+}
+
+
+static int amdgpu_i2c_get_data(void *i2c_priv)
+{
+	struct amdgpu_i2c_chan *i2c = i2c_priv;
+	struct amdgpu_device *adev = i2c->dev->dev_private;
+	struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t val;
+
+	/* read the value off the pin */
+	val = RREG32(rec->y_data_reg);
+	val &= rec->y_data_mask;
+
+	return (val != 0);
+}
+
+static void amdgpu_i2c_set_clock(void *i2c_priv, int clock)
+{
+	struct amdgpu_i2c_chan *i2c = i2c_priv;
+	struct amdgpu_device *adev = i2c->dev->dev_private;
+	struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t val;
+
+	/* set pin direction */
+	val = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
+	val |= clock ? 0 : rec->en_clk_mask;
+	WREG32(rec->en_clk_reg, val);
+}
+
+static void amdgpu_i2c_set_data(void *i2c_priv, int data)
+{
+	struct amdgpu_i2c_chan *i2c = i2c_priv;
+	struct amdgpu_device *adev = i2c->dev->dev_private;
+	struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t val;
+
+	/* set pin direction */
+	val = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
+	val |= data ? 0 : rec->en_data_mask;
+	WREG32(rec->en_data_reg, val);
+}
+
+static const struct i2c_algorithm amdgpu_atombios_i2c_algo = {
+	.master_xfer = amdgpu_atombios_i2c_xfer,
+	.functionality = amdgpu_atombios_i2c_func,
+};
+
+struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
+					    struct amdgpu_i2c_bus_rec *rec,
+					    const char *name)
+{
+	struct amdgpu_i2c_chan *i2c;
+	int ret;
+
+	/* don't add the mm_i2c bus unless hw_i2c is enabled */
+	if (rec->mm_i2c && (amdgpu_hw_i2c == 0))
+		return NULL;
+
+	i2c = kzalloc(sizeof(struct amdgpu_i2c_chan), GFP_KERNEL);
+	if (i2c == NULL)
+		return NULL;
+
+	i2c->rec = *rec;
+	i2c->adapter.owner = THIS_MODULE;
+	i2c->adapter.class = I2C_CLASS_DDC;
+	i2c->adapter.dev.parent = &dev->pdev->dev;
+	i2c->dev = dev;
+	i2c_set_adapdata(&i2c->adapter, i2c);
+	mutex_init(&i2c->mutex);
+	if (rec->hw_capable &&
+	    amdgpu_hw_i2c) {
+		/* hw i2c using atom */
+		snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+			 "AMDGPU i2c hw bus %s", name);
+		i2c->adapter.algo = &amdgpu_atombios_i2c_algo;
+		ret = i2c_add_adapter(&i2c->adapter);
+		if (ret) {
+			DRM_ERROR("Failed to register hw i2c %s\n", name);
+			goto out_free;
+		}
+	} else {
+		/* set the amdgpu bit adapter */
+		snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+			 "AMDGPU i2c bit bus %s", name);
+		i2c->adapter.algo_data = &i2c->bit;
+		i2c->bit.pre_xfer = amdgpu_i2c_pre_xfer;
+		i2c->bit.post_xfer = amdgpu_i2c_post_xfer;
+		i2c->bit.setsda = amdgpu_i2c_set_data;
+		i2c->bit.setscl = amdgpu_i2c_set_clock;
+		i2c->bit.getsda = amdgpu_i2c_get_data;
+		i2c->bit.getscl = amdgpu_i2c_get_clock;
+		i2c->bit.udelay = 10;
+		i2c->bit.timeout = usecs_to_jiffies(2200);	/* from VESA */
+		i2c->bit.data = i2c;
+		ret = i2c_bit_add_bus(&i2c->adapter);
+		if (ret) {
+			DRM_ERROR("Failed to register bit i2c %s\n", name);
+			goto out_free;
+		}
+	}
+
+	return i2c;
+out_free:
+	kfree(i2c);
+	return NULL;
+
+}
+
+void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c)
+{
+	if (!i2c)
+		return;
+	i2c_del_adapter(&i2c->adapter);
+	kfree(i2c);
+}
+
+/* Add the default buses */
+void amdgpu_i2c_init(struct amdgpu_device *adev)
+{
+	if (amdgpu_hw_i2c)
+		DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
+
+	if (adev->is_atom_bios)
+		amdgpu_atombios_i2c_init(adev);
+}
+
+/* remove all the buses */
+void amdgpu_i2c_fini(struct amdgpu_device *adev)
+{
+	int i;
+
+	for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
+		if (adev->i2c_bus[i]) {
+			amdgpu_i2c_destroy(adev->i2c_bus[i]);
+			adev->i2c_bus[i] = NULL;
+		}
+	}
+}
+
+/* Add additional buses */
+void amdgpu_i2c_add(struct amdgpu_device *adev,
+		     struct amdgpu_i2c_bus_rec *rec,
+		     const char *name)
+{
+	struct drm_device *dev = adev->ddev;
+	int i;
+
+	for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
+		if (!adev->i2c_bus[i]) {
+			adev->i2c_bus[i] = amdgpu_i2c_create(dev, rec, name);
+			return;
+		}
+	}
+}
+
+/* looks up bus based on id */
+struct amdgpu_i2c_chan *
+amdgpu_i2c_lookup(struct amdgpu_device *adev,
+		   struct amdgpu_i2c_bus_rec *i2c_bus)
+{
+	int i;
+
+	for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
+		if (adev->i2c_bus[i] &&
+		    (adev->i2c_bus[i]->rec.i2c_id == i2c_bus->i2c_id)) {
+			return adev->i2c_bus[i];
+		}
+	}
+	return NULL;
+}
+
+static void amdgpu_i2c_get_byte(struct amdgpu_i2c_chan *i2c_bus,
+				 u8 slave_addr,
+				 u8 addr,
+				 u8 *val)
+{
+	u8 out_buf[2];
+	u8 in_buf[2];
+	struct i2c_msg msgs[] = {
+		{
+			.addr = slave_addr,
+			.flags = 0,
+			.len = 1,
+			.buf = out_buf,
+		},
+		{
+			.addr = slave_addr,
+			.flags = I2C_M_RD,
+			.len = 1,
+			.buf = in_buf,
+		}
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = 0;
+
+	if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) {
+		*val = in_buf[0];
+		DRM_DEBUG("val = 0x%02x\n", *val);
+	} else {
+		DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n",
+			  addr, *val);
+	}
+}
+
+static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
+				 u8 slave_addr,
+				 u8 addr,
+				 u8 val)
+{
+	uint8_t out_buf[2];
+	struct i2c_msg msg = {
+		.addr = slave_addr,
+		.flags = 0,
+		.len = 2,
+		.buf = out_buf,
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = val;
+
+	if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
+		DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n",
+			  addr, val);
+}
+
+/* ddc router switching */
+void
+amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector)
+{
+	u8 val;
+
+	if (!amdgpu_connector->router.ddc_valid)
+		return;
+
+	if (!amdgpu_connector->router_bus)
+		return;
+
+	amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
+			    amdgpu_connector->router.i2c_addr,
+			    0x3, &val);
+	val &= ~amdgpu_connector->router.ddc_mux_control_pin;
+	amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
+			    amdgpu_connector->router.i2c_addr,
+			    0x3, val);
+	amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
+			    amdgpu_connector->router.i2c_addr,
+			    0x1, &val);
+	val &= ~amdgpu_connector->router.ddc_mux_control_pin;
+	val |= amdgpu_connector->router.ddc_mux_state;
+	amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
+			    amdgpu_connector->router.i2c_addr,
+			    0x1, val);
+}
+
+/* clock/data router switching */
+void
+amdgpu_i2c_router_select_cd_port(struct amdgpu_connector *amdgpu_connector)
+{
+	u8 val;
+
+	if (!amdgpu_connector->router.cd_valid)
+		return;
+
+	if (!amdgpu_connector->router_bus)
+		return;
+
+	amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
+			    amdgpu_connector->router.i2c_addr,
+			    0x3, &val);
+	val &= ~amdgpu_connector->router.cd_mux_control_pin;
+	amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
+			    amdgpu_connector->router.i2c_addr,
+			    0x3, val);
+	amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
+			    amdgpu_connector->router.i2c_addr,
+			    0x1, &val);
+	val &= ~amdgpu_connector->router.cd_mux_control_pin;
+	val |= amdgpu_connector->router.cd_mux_state;
+	amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
+			    amdgpu_connector->router.i2c_addr,
+			    0x1, val);
+}

+ 44 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h

@@ -0,0 +1,44 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_I2C_H__
+#define __AMDGPU_I2C_H__
+
+struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
+					    struct amdgpu_i2c_bus_rec *rec,
+					    const char *name);
+void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c);
+void amdgpu_i2c_init(struct amdgpu_device *adev);
+void amdgpu_i2c_fini(struct amdgpu_device *adev);
+void amdgpu_i2c_add(struct amdgpu_device *adev,
+		     struct amdgpu_i2c_bus_rec *rec,
+		     const char *name);
+struct amdgpu_i2c_chan *
+amdgpu_i2c_lookup(struct amdgpu_device *adev,
+		   struct amdgpu_i2c_bus_rec *i2c_bus);
+void
+amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector);
+void
+amdgpu_i2c_router_select_cd_port(struct amdgpu_connector *amdgpu_connector);
+
+#endif

+ 353 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c

@@ -0,0 +1,353 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ *          Christian König
+ */
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+#include "atom.h"
+
+/*
+ * IB
+ * IBs (Indirect Buffers) and areas of GPU accessible memory where
+ * commands are stored.  You can put a pointer to the IB in the
+ * command ring and the hw will fetch the commands from the IB
+ * and execute them.  Generally userspace acceleration drivers
+ * produce command buffers which are send to the kernel and
+ * put in IBs for execution by the requested ring.
+ */
+static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_ib_get - request an IB (Indirect Buffer)
+ *
+ * @ring: ring index the IB is associated with
+ * @size: requested IB size
+ * @ib: IB object returned
+ *
+ * Request an IB (all asics).  IBs are allocated using the
+ * suballocator.
+ * Returns 0 on success, error on failure.
+ */
+int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
+		  unsigned size, struct amdgpu_ib *ib)
+{
+	struct amdgpu_device *adev = ring->adev;
+	int r;
+
+	if (size) {
+		r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo,
+				      &ib->sa_bo, size, 256);
+		if (r) {
+			dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
+			return r;
+		}
+
+		ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
+
+		if (!vm)
+			ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
+		else
+			ib->gpu_addr = 0;
+
+	} else {
+		ib->sa_bo = NULL;
+		ib->ptr = NULL;
+		ib->gpu_addr = 0;
+	}
+
+	amdgpu_sync_create(&ib->sync);
+
+	ib->ring = ring;
+	ib->fence = NULL;
+	ib->user = NULL;
+	ib->vm = vm;
+	ib->gds_base = 0;
+	ib->gds_size = 0;
+	ib->gws_base = 0;
+	ib->gws_size = 0;
+	ib->oa_base = 0;
+	ib->oa_size = 0;
+	ib->flags = 0;
+
+	return 0;
+}
+
+/**
+ * amdgpu_ib_free - free an IB (Indirect Buffer)
+ *
+ * @adev: amdgpu_device pointer
+ * @ib: IB object to free
+ *
+ * Free an IB (all asics).
+ */
+void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
+{
+	amdgpu_sync_free(adev, &ib->sync, ib->fence);
+	amdgpu_sa_bo_free(adev, &ib->sa_bo, ib->fence);
+	amdgpu_fence_unref(&ib->fence);
+}
+
+/**
+ * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring
+ *
+ * @adev: amdgpu_device pointer
+ * @num_ibs: number of IBs to schedule
+ * @ibs: IB objects to schedule
+ * @owner: owner for creating the fences
+ *
+ * Schedule an IB on the associated ring (all asics).
+ * Returns 0 on success, error on failure.
+ *
+ * On SI, there are two parallel engines fed from the primary ring,
+ * the CE (Constant Engine) and the DE (Drawing Engine).  Since
+ * resource descriptors have moved to memory, the CE allows you to
+ * prime the caches while the DE is updating register state so that
+ * the resource descriptors will be already in cache when the draw is
+ * processed.  To accomplish this, the userspace driver submits two
+ * IBs, one for the CE and one for the DE.  If there is a CE IB (called
+ * a CONST_IB), it will be put on the ring prior to the DE IB.  Prior
+ * to SI there was just a DE IB.
+ */
+int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
+		       struct amdgpu_ib *ibs, void *owner)
+{
+	struct amdgpu_ib *ib = &ibs[0];
+	struct amdgpu_ring *ring;
+	struct amdgpu_ctx *ctx, *old_ctx;
+	struct amdgpu_vm *vm;
+	unsigned i;
+	int r = 0;
+
+	if (num_ibs == 0)
+		return -EINVAL;
+
+	ring = ibs->ring;
+	ctx = ibs->ctx;
+	vm = ibs->vm;
+
+	if (!ring->ready) {
+		dev_err(adev->dev, "couldn't schedule ib\n");
+		return -EINVAL;
+	}
+
+	r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs);
+	if (r) {
+		dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
+		return r;
+	}
+
+	if (vm) {
+		/* grab a vm id if necessary */
+		struct amdgpu_fence *vm_id_fence = NULL;
+		vm_id_fence = amdgpu_vm_grab_id(ibs->ring, ibs->vm);
+		amdgpu_sync_fence(&ibs->sync, vm_id_fence);
+	}
+
+	r = amdgpu_sync_rings(&ibs->sync, ring);
+	if (r) {
+		amdgpu_ring_unlock_undo(ring);
+		dev_err(adev->dev, "failed to sync rings (%d)\n", r);
+		return r;
+	}
+
+	if (vm) {
+		/* do context switch */
+		amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update);
+	}
+
+	if (vm && ring->funcs->emit_gds_switch)
+		amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
+					    ib->gds_base, ib->gds_size,
+					    ib->gws_base, ib->gws_size,
+					    ib->oa_base, ib->oa_size);
+
+	if (ring->funcs->emit_hdp_flush)
+		amdgpu_ring_emit_hdp_flush(ring);
+
+	old_ctx = ring->current_ctx;
+	for (i = 0; i < num_ibs; ++i) {
+		ib = &ibs[i];
+
+		if (ib->ring != ring || ib->ctx != ctx || ib->vm != vm) {
+			ring->current_ctx = old_ctx;
+			amdgpu_ring_unlock_undo(ring);
+			return -EINVAL;
+		}
+		amdgpu_ring_emit_ib(ring, ib);
+		ring->current_ctx = ctx;
+	}
+
+	r = amdgpu_fence_emit(ring, owner, &ib->fence);
+	if (r) {
+		dev_err(adev->dev, "failed to emit fence (%d)\n", r);
+		ring->current_ctx = old_ctx;
+		amdgpu_ring_unlock_undo(ring);
+		return r;
+	}
+
+	/* wrap the last IB with fence */
+	if (ib->user) {
+		uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo);
+		addr += ib->user->offset;
+		amdgpu_ring_emit_fence(ring, addr, ib->fence->seq, true);
+	}
+
+	if (ib->vm)
+		amdgpu_vm_fence(adev, ib->vm, ib->fence);
+
+	amdgpu_ring_unlock_commit(ring);
+	return 0;
+}
+
+/**
+ * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize the suballocator to manage a pool of memory
+ * for use as IBs (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int amdgpu_ib_pool_init(struct amdgpu_device *adev)
+{
+	int r;
+
+	if (adev->ib_pool_ready) {
+		return 0;
+	}
+	r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo,
+				      AMDGPU_IB_POOL_SIZE*64*1024,
+				      AMDGPU_GPU_PAGE_SIZE,
+				      AMDGPU_GEM_DOMAIN_GTT);
+	if (r) {
+		return r;
+	}
+
+	r = amdgpu_sa_bo_manager_start(adev, &adev->ring_tmp_bo);
+	if (r) {
+		return r;
+	}
+
+	adev->ib_pool_ready = true;
+	if (amdgpu_debugfs_sa_init(adev)) {
+		dev_err(adev->dev, "failed to register debugfs file for SA\n");
+	}
+	return 0;
+}
+
+/**
+ * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down the suballocator managing the pool of memory
+ * for use as IBs (all asics).
+ */
+void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
+{
+	if (adev->ib_pool_ready) {
+		amdgpu_sa_bo_manager_suspend(adev, &adev->ring_tmp_bo);
+		amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
+		adev->ib_pool_ready = false;
+	}
+}
+
+/**
+ * amdgpu_ib_ring_tests - test IBs on the rings
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Test an IB (Indirect Buffer) on each ring.
+ * If the test fails, disable the ring.
+ * Returns 0 on success, error if the primary GFX ring
+ * IB test fails.
+ */
+int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
+{
+	unsigned i;
+	int r;
+
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+		struct amdgpu_ring *ring = adev->rings[i];
+
+		if (!ring || !ring->ready)
+			continue;
+
+		r = amdgpu_ring_test_ib(ring);
+		if (r) {
+			ring->ready = false;
+			adev->needs_reset = false;
+
+			if (ring == &adev->gfx.gfx_ring[0]) {
+				/* oh, oh, that's really bad */
+				DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r);
+				adev->accel_working = false;
+				return r;
+
+			} else {
+				/* still not good, but we can live with it */
+				DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
+			}
+		}
+	}
+	return 0;
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+
+	amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m);
+
+	return 0;
+
+}
+
+static struct drm_info_list amdgpu_debugfs_sa_list[] = {
+	{"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL},
+};
+
+#endif
+
+static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1);
+#else
+	return 0;
+#endif
+}

+ 216 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c

@@ -0,0 +1,216 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+
+/**
+ * amdgpu_ih_ring_alloc - allocate memory for the IH ring
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate a ring buffer for the interrupt controller.
+ * Returns 0 for success, errors for failure.
+ */
+static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev)
+{
+	int r;
+
+	/* Allocate ring buffer */
+	if (adev->irq.ih.ring_obj == NULL) {
+		r = amdgpu_bo_create(adev, adev->irq.ih.ring_size,
+				     PAGE_SIZE, true,
+				     AMDGPU_GEM_DOMAIN_GTT, 0,
+				     NULL, &adev->irq.ih.ring_obj);
+		if (r) {
+			DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r);
+			return r;
+		}
+		r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false);
+		if (unlikely(r != 0))
+			return r;
+		r = amdgpu_bo_pin(adev->irq.ih.ring_obj,
+				  AMDGPU_GEM_DOMAIN_GTT,
+				  &adev->irq.ih.gpu_addr);
+		if (r) {
+			amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
+			DRM_ERROR("amdgpu: failed to pin ih ring buffer (%d).\n", r);
+			return r;
+		}
+		r = amdgpu_bo_kmap(adev->irq.ih.ring_obj,
+				   (void **)&adev->irq.ih.ring);
+		amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
+		if (r) {
+			DRM_ERROR("amdgpu: failed to map ih ring buffer (%d).\n", r);
+			return r;
+		}
+	}
+	return 0;
+}
+
+/**
+ * amdgpu_ih_ring_init - initialize the IH state
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initializes the IH state and allocates a buffer
+ * for the IH ring buffer.
+ * Returns 0 for success, errors for failure.
+ */
+int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
+			bool use_bus_addr)
+{
+	u32 rb_bufsz;
+	int r;
+
+	/* Align ring size */
+	rb_bufsz = order_base_2(ring_size / 4);
+	ring_size = (1 << rb_bufsz) * 4;
+	adev->irq.ih.ring_size = ring_size;
+	adev->irq.ih.ptr_mask = adev->irq.ih.ring_size - 1;
+	adev->irq.ih.rptr = 0;
+	adev->irq.ih.use_bus_addr = use_bus_addr;
+
+	if (adev->irq.ih.use_bus_addr) {
+		if (!adev->irq.ih.ring) {
+			/* add 8 bytes for the rptr/wptr shadows and
+			 * add them to the end of the ring allocation.
+			 */
+			adev->irq.ih.ring = kzalloc(adev->irq.ih.ring_size + 8, GFP_KERNEL);
+			if (adev->irq.ih.ring == NULL)
+				return -ENOMEM;
+			adev->irq.ih.rb_dma_addr = pci_map_single(adev->pdev,
+								  (void *)adev->irq.ih.ring,
+								  adev->irq.ih.ring_size,
+								  PCI_DMA_BIDIRECTIONAL);
+			if (pci_dma_mapping_error(adev->pdev, adev->irq.ih.rb_dma_addr)) {
+				dev_err(&adev->pdev->dev, "Failed to DMA MAP the IH RB page\n");
+				kfree((void *)adev->irq.ih.ring);
+				return -ENOMEM;
+			}
+			adev->irq.ih.wptr_offs = (adev->irq.ih.ring_size / 4) + 0;
+			adev->irq.ih.rptr_offs = (adev->irq.ih.ring_size / 4) + 1;
+		}
+		return 0;
+	} else {
+		r = amdgpu_wb_get(adev, &adev->irq.ih.wptr_offs);
+		if (r) {
+			dev_err(adev->dev, "(%d) ih wptr_offs wb alloc failed\n", r);
+			return r;
+		}
+
+		r = amdgpu_wb_get(adev, &adev->irq.ih.rptr_offs);
+		if (r) {
+			amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
+			dev_err(adev->dev, "(%d) ih rptr_offs wb alloc failed\n", r);
+			return r;
+		}
+
+		return amdgpu_ih_ring_alloc(adev);
+	}
+}
+
+/**
+ * amdgpu_ih_ring_fini - tear down the IH state
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tears down the IH state and frees buffer
+ * used for the IH ring buffer.
+ */
+void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
+{
+	int r;
+
+	if (adev->irq.ih.use_bus_addr) {
+		if (adev->irq.ih.ring) {
+			/* add 8 bytes for the rptr/wptr shadows and
+			 * add them to the end of the ring allocation.
+			 */
+			pci_unmap_single(adev->pdev, adev->irq.ih.rb_dma_addr,
+					 adev->irq.ih.ring_size + 8, PCI_DMA_BIDIRECTIONAL);
+			kfree((void *)adev->irq.ih.ring);
+			adev->irq.ih.ring = NULL;
+		}
+	} else {
+		if (adev->irq.ih.ring_obj) {
+			r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false);
+			if (likely(r == 0)) {
+				amdgpu_bo_kunmap(adev->irq.ih.ring_obj);
+				amdgpu_bo_unpin(adev->irq.ih.ring_obj);
+				amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
+			}
+			amdgpu_bo_unref(&adev->irq.ih.ring_obj);
+			adev->irq.ih.ring = NULL;
+			adev->irq.ih.ring_obj = NULL;
+		}
+		amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
+		amdgpu_wb_free(adev, adev->irq.ih.rptr_offs);
+	}
+}
+
+/**
+ * amdgpu_ih_process - interrupt handler
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Interrupt hander (VI), walk the IH ring.
+ * Returns irq process return code.
+ */
+int amdgpu_ih_process(struct amdgpu_device *adev)
+{
+	struct amdgpu_iv_entry entry;
+	u32 wptr;
+
+	if (!adev->irq.ih.enabled || adev->shutdown)
+		return IRQ_NONE;
+
+	wptr = amdgpu_ih_get_wptr(adev);
+
+restart_ih:
+	/* is somebody else already processing irqs? */
+	if (atomic_xchg(&adev->irq.ih.lock, 1))
+		return IRQ_NONE;
+
+	DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, adev->irq.ih.rptr, wptr);
+
+	/* Order reading of wptr vs. reading of IH ring data */
+	rmb();
+
+	while (adev->irq.ih.rptr != wptr) {
+		amdgpu_ih_decode_iv(adev, &entry);
+		adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
+
+		amdgpu_irq_dispatch(adev, &entry);
+	}
+	amdgpu_ih_set_rptr(adev);
+	atomic_set(&adev->irq.ih.lock, 0);
+
+	/* make sure wptr hasn't changed while processing */
+	wptr = amdgpu_ih_get_wptr(adev);
+	if (wptr != adev->irq.ih.rptr)
+		goto restart_ih;
+
+	return IRQ_HANDLED;
+}

+ 62 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h

@@ -0,0 +1,62 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_IH_H__
+#define __AMDGPU_IH_H__
+
+struct amdgpu_device;
+
+/*
+ * R6xx+ IH ring
+ */
+struct amdgpu_ih_ring {
+	struct amdgpu_bo	*ring_obj;
+	volatile uint32_t	*ring;
+	unsigned		rptr;
+	unsigned		ring_size;
+	uint64_t		gpu_addr;
+	uint32_t		ptr_mask;
+	atomic_t		lock;
+	bool                    enabled;
+	unsigned		wptr_offs;
+	unsigned		rptr_offs;
+	u32			doorbell_index;
+	bool			use_doorbell;
+	bool			use_bus_addr;
+	dma_addr_t		rb_dma_addr; /* only used when use_bus_addr = true */
+};
+
+struct amdgpu_iv_entry {
+	unsigned src_id;
+	unsigned src_data;
+	unsigned ring_id;
+	unsigned vm_id;
+	unsigned pas_id;
+};
+
+int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
+			bool use_bus_addr);
+void amdgpu_ih_ring_fini(struct amdgpu_device *adev);
+int amdgpu_ih_process(struct amdgpu_device *adev);
+
+#endif

+ 47 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c

@@ -0,0 +1,47 @@
+/**
+ * \file amdgpu_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the AMDGPU DRM.
+ *
+ * \author Paul Mackerras <paulus@samba.org>
+ *
+ * Copyright (C) Paul Mackerras 2005
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <linux/compat.h>
+
+#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu_drv.h"
+
+long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	int ret;
+
+	if (nr < DRM_COMMAND_BASE)
+		return drm_compat_ioctl(filp, cmd, arg);
+
+	ret = amdgpu_drm_ioctl(filp, cmd, arg);
+
+	return ret;
+}

+ 458 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c

@@ -0,0 +1,458 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "atom.h"
+#include "amdgpu_connectors.h"
+
+#include <linux/pm_runtime.h>
+
+#define AMDGPU_WAIT_IDLE_TIMEOUT 200
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+/**
+ * amdgpu_hotplug_work_func - display hotplug work handler
+ *
+ * @work: work struct
+ *
+ * This is the hot plug event work handler (all asics).
+ * The work gets scheduled from the irq handler if there
+ * was a hot plug interrupt.  It walks the connector table
+ * and calls the hotplug handler for each one, then sends
+ * a drm hotplug event to alert userspace.
+ */
+static void amdgpu_hotplug_work_func(struct work_struct *work)
+{
+	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
+						  hotplug_work);
+	struct drm_device *dev = adev->ddev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_connector *connector;
+
+	mutex_lock(&mode_config->mutex);
+	if (mode_config->num_connector) {
+		list_for_each_entry(connector, &mode_config->connector_list, head)
+			amdgpu_connector_hotplug(connector);
+	}
+	mutex_unlock(&mode_config->mutex);
+	/* Just fire off a uevent and let userspace tell us what to do */
+	drm_helper_hpd_irq_event(dev);
+}
+
+/**
+ * amdgpu_irq_reset_work_func - execute gpu reset
+ *
+ * @work: work struct
+ *
+ * Execute scheduled gpu reset (cayman+).
+ * This function is called when the irq handler
+ * thinks we need a gpu reset.
+ */
+static void amdgpu_irq_reset_work_func(struct work_struct *work)
+{
+	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
+						  reset_work);
+
+	amdgpu_gpu_reset(adev);
+}
+
+/* Disable *all* interrupts */
+static void amdgpu_irq_disable_all(struct amdgpu_device *adev)
+{
+	unsigned long irqflags;
+	unsigned i, j;
+	int r;
+
+	spin_lock_irqsave(&adev->irq.lock, irqflags);
+	for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
+		struct amdgpu_irq_src *src = adev->irq.sources[i];
+
+		if (!src || !src->funcs->set || !src->num_types)
+			continue;
+
+		for (j = 0; j < src->num_types; ++j) {
+			atomic_set(&src->enabled_types[j], 0);
+			r = src->funcs->set(adev, src, j,
+					    AMDGPU_IRQ_STATE_DISABLE);
+			if (r)
+				DRM_ERROR("error disabling interrupt (%d)\n",
+					  r);
+		}
+	}
+	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
+}
+
+/**
+ * amdgpu_irq_preinstall - drm irq preinstall callback
+ *
+ * @dev: drm dev pointer
+ *
+ * Gets the hw ready to enable irqs (all asics).
+ * This function disables all interrupt sources on the GPU.
+ */
+void amdgpu_irq_preinstall(struct drm_device *dev)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+
+	/* Disable *all* interrupts */
+	amdgpu_irq_disable_all(adev);
+	/* Clear bits */
+	amdgpu_ih_process(adev);
+}
+
+/**
+ * amdgpu_irq_postinstall - drm irq preinstall callback
+ *
+ * @dev: drm dev pointer
+ *
+ * Handles stuff to be done after enabling irqs (all asics).
+ * Returns 0 on success.
+ */
+int amdgpu_irq_postinstall(struct drm_device *dev)
+{
+	dev->max_vblank_count = 0x001fffff;
+	return 0;
+}
+
+/**
+ * amdgpu_irq_uninstall - drm irq uninstall callback
+ *
+ * @dev: drm dev pointer
+ *
+ * This function disables all interrupt sources on the GPU (all asics).
+ */
+void amdgpu_irq_uninstall(struct drm_device *dev)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+
+	if (adev == NULL) {
+		return;
+	}
+	amdgpu_irq_disable_all(adev);
+}
+
+/**
+ * amdgpu_irq_handler - irq handler
+ *
+ * @int irq, void *arg: args
+ *
+ * This is the irq handler for the amdgpu driver (all asics).
+ */
+irqreturn_t amdgpu_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	struct amdgpu_device *adev = dev->dev_private;
+	irqreturn_t ret;
+
+	ret = amdgpu_ih_process(adev);
+	if (ret == IRQ_HANDLED)
+		pm_runtime_mark_last_busy(dev->dev);
+	return ret;
+}
+
+/**
+ * amdgpu_msi_ok - asic specific msi checks
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Handles asic specific MSI checks to determine if
+ * MSIs should be enabled on a particular chip (all asics).
+ * Returns true if MSIs should be enabled, false if MSIs
+ * should not be enabled.
+ */
+static bool amdgpu_msi_ok(struct amdgpu_device *adev)
+{
+	/* force MSI on */
+	if (amdgpu_msi == 1)
+		return true;
+	else if (amdgpu_msi == 0)
+		return false;
+
+	return true;
+}
+
+/**
+ * amdgpu_irq_init - init driver interrupt info
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_irq_init(struct amdgpu_device *adev)
+{
+	int r = 0;
+
+	spin_lock_init(&adev->irq.lock);
+	r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
+	if (r) {
+		return r;
+	}
+	/* enable msi */
+	adev->irq.msi_enabled = false;
+
+	if (amdgpu_msi_ok(adev)) {
+		int ret = pci_enable_msi(adev->pdev);
+		if (!ret) {
+			adev->irq.msi_enabled = true;
+			dev_info(adev->dev, "amdgpu: using MSI.\n");
+		}
+	}
+
+	INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func);
+	INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
+
+	adev->irq.installed = true;
+	r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
+	if (r) {
+		adev->irq.installed = false;
+		flush_work(&adev->hotplug_work);
+		return r;
+	}
+
+	DRM_INFO("amdgpu: irq initialized.\n");
+	return 0;
+}
+
+/**
+ * amdgpu_irq_fini - tear down driver interrupt info
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics).
+ */
+void amdgpu_irq_fini(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	drm_vblank_cleanup(adev->ddev);
+	if (adev->irq.installed) {
+		drm_irq_uninstall(adev->ddev);
+		adev->irq.installed = false;
+		if (adev->irq.msi_enabled)
+			pci_disable_msi(adev->pdev);
+		flush_work(&adev->hotplug_work);
+	}
+
+	for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
+		struct amdgpu_irq_src *src = adev->irq.sources[i];
+
+		if (!src)
+			continue;
+
+		kfree(src->enabled_types);
+		src->enabled_types = NULL;
+	}
+}
+
+/**
+ * amdgpu_irq_add_id - register irq source
+ *
+ * @adev: amdgpu device pointer
+ * @src_id: source id for this source
+ * @source: irq source
+ *
+ */
+int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
+		      struct amdgpu_irq_src *source)
+{
+	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
+		return -EINVAL;
+
+	if (adev->irq.sources[src_id] != NULL)
+		return -EINVAL;
+
+	if (!source->funcs)
+		return -EINVAL;
+
+	if (source->num_types && !source->enabled_types) {
+		atomic_t *types;
+
+		types = kcalloc(source->num_types, sizeof(atomic_t),
+				GFP_KERNEL);
+		if (!types)
+			return -ENOMEM;
+
+		source->enabled_types = types;
+	}
+
+	adev->irq.sources[src_id] = source;
+	return 0;
+}
+
+/**
+ * amdgpu_irq_dispatch - dispatch irq to IP blocks
+ *
+ * @adev: amdgpu device pointer
+ * @entry: interrupt vector
+ *
+ * Dispatches the irq to the different IP blocks
+ */
+void amdgpu_irq_dispatch(struct amdgpu_device *adev,
+			 struct amdgpu_iv_entry *entry)
+{
+	unsigned src_id = entry->src_id;
+	struct amdgpu_irq_src *src;
+	int r;
+
+	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
+		DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
+		return;
+	}
+
+	src = adev->irq.sources[src_id];
+	if (!src) {
+		DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
+		return;
+	}
+
+	r = src->funcs->process(adev, src, entry);
+	if (r)
+		DRM_ERROR("error processing interrupt (%d)\n", r);
+}
+
+/**
+ * amdgpu_irq_update - update hw interrupt state
+ *
+ * @adev: amdgpu device pointer
+ * @src: interrupt src you want to enable
+ * @type: type of interrupt you want to update
+ *
+ * Updates the interrupt state for a specific src (all asics).
+ */
+int amdgpu_irq_update(struct amdgpu_device *adev,
+			     struct amdgpu_irq_src *src, unsigned type)
+{
+	unsigned long irqflags;
+	enum amdgpu_interrupt_state state;
+	int r;
+
+	spin_lock_irqsave(&adev->irq.lock, irqflags);
+
+	/* we need to determine after taking the lock, otherwise
+	   we might disable just enabled interrupts again */
+	if (amdgpu_irq_enabled(adev, src, type))
+		state = AMDGPU_IRQ_STATE_ENABLE;
+	else
+		state = AMDGPU_IRQ_STATE_DISABLE;
+
+	r = src->funcs->set(adev, src, type, state);
+	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
+	return r;
+}
+
+/**
+ * amdgpu_irq_get - enable interrupt
+ *
+ * @adev: amdgpu device pointer
+ * @src: interrupt src you want to enable
+ * @type: type of interrupt you want to enable
+ *
+ * Enables the interrupt type for a specific src (all asics).
+ */
+int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
+		   unsigned type)
+{
+	if (!adev->ddev->irq_enabled)
+		return -ENOENT;
+
+	if (type >= src->num_types)
+		return -EINVAL;
+
+	if (!src->enabled_types || !src->funcs->set)
+		return -EINVAL;
+
+	if (atomic_inc_return(&src->enabled_types[type]) == 1)
+		return amdgpu_irq_update(adev, src, type);
+
+	return 0;
+}
+
+bool amdgpu_irq_get_delayed(struct amdgpu_device *adev,
+			struct amdgpu_irq_src *src,
+			unsigned type)
+{
+	if ((type >= src->num_types) || !src->enabled_types)
+		return false;
+	return atomic_inc_return(&src->enabled_types[type]) == 1;
+}
+
+/**
+ * amdgpu_irq_put - disable interrupt
+ *
+ * @adev: amdgpu device pointer
+ * @src: interrupt src you want to disable
+ * @type: type of interrupt you want to disable
+ *
+ * Disables the interrupt type for a specific src (all asics).
+ */
+int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
+		   unsigned type)
+{
+	if (!adev->ddev->irq_enabled)
+		return -ENOENT;
+
+	if (type >= src->num_types)
+		return -EINVAL;
+
+	if (!src->enabled_types || !src->funcs->set)
+		return -EINVAL;
+
+	if (atomic_dec_and_test(&src->enabled_types[type]))
+		return amdgpu_irq_update(adev, src, type);
+
+	return 0;
+}
+
+/**
+ * amdgpu_irq_enabled - test if irq is enabled or not
+ *
+ * @adev: amdgpu device pointer
+ * @idx: interrupt src you want to test
+ *
+ * Tests if the given interrupt source is enabled or not
+ */
+bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
+			unsigned type)
+{
+	if (!adev->ddev->irq_enabled)
+		return false;
+
+	if (type >= src->num_types)
+		return false;
+
+	if (!src->enabled_types || !src->funcs->set)
+		return false;
+
+	return !!atomic_read(&src->enabled_types[type]);
+}

+ 92 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h

@@ -0,0 +1,92 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_IRQ_H__
+#define __AMDGPU_IRQ_H__
+
+#include "amdgpu_ih.h"
+
+#define AMDGPU_MAX_IRQ_SRC_ID	0x100
+
+struct amdgpu_device;
+struct amdgpu_iv_entry;
+
+enum amdgpu_interrupt_state {
+	AMDGPU_IRQ_STATE_DISABLE,
+	AMDGPU_IRQ_STATE_ENABLE,
+};
+
+struct amdgpu_irq_src {
+	unsigned				num_types;
+	atomic_t				*enabled_types;
+	const struct amdgpu_irq_src_funcs	*funcs;
+};
+
+/* provided by interrupt generating IP blocks */
+struct amdgpu_irq_src_funcs {
+	int (*set)(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
+		   unsigned type, enum amdgpu_interrupt_state state);
+
+	int (*process)(struct amdgpu_device *adev,
+		       struct amdgpu_irq_src *source,
+		       struct amdgpu_iv_entry *entry);
+};
+
+struct amdgpu_irq {
+	bool				installed;
+	spinlock_t			lock;
+	/* interrupt sources */
+	struct amdgpu_irq_src		*sources[AMDGPU_MAX_IRQ_SRC_ID];
+
+	/* status, etc. */
+	bool				msi_enabled; /* msi enabled */
+
+	/* interrupt ring */
+	struct amdgpu_ih_ring		ih;
+	const struct amdgpu_ih_funcs	*ih_funcs;
+};
+
+void amdgpu_irq_preinstall(struct drm_device *dev);
+int amdgpu_irq_postinstall(struct drm_device *dev);
+void amdgpu_irq_uninstall(struct drm_device *dev);
+irqreturn_t amdgpu_irq_handler(int irq, void *arg);
+
+int amdgpu_irq_init(struct amdgpu_device *adev);
+void amdgpu_irq_fini(struct amdgpu_device *adev);
+int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
+		      struct amdgpu_irq_src *source);
+void amdgpu_irq_dispatch(struct amdgpu_device *adev,
+			 struct amdgpu_iv_entry *entry);
+int amdgpu_irq_update(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
+		      unsigned type);
+int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
+		   unsigned type);
+bool amdgpu_irq_get_delayed(struct amdgpu_device *adev,
+			    struct amdgpu_irq_src *src,
+			    unsigned type);
+int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
+		   unsigned type);
+bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
+			unsigned type);
+
+#endif

+ 697 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c

@@ -0,0 +1,697 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include <drm/amdgpu_drm.h>
+#include "amdgpu_uvd.h"
+#include "amdgpu_vce.h"
+
+#include <linux/vga_switcheroo.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool amdgpu_has_atpx(void);
+#else
+static inline bool amdgpu_has_atpx(void) { return false; }
+#endif
+
+/**
+ * amdgpu_driver_unload_kms - Main unload function for KMS.
+ *
+ * @dev: drm dev pointer
+ *
+ * This is the main unload function for KMS (all asics).
+ * Returns 0 on success.
+ */
+int amdgpu_driver_unload_kms(struct drm_device *dev)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+
+	if (adev == NULL)
+		return 0;
+
+	if (adev->rmmio == NULL)
+		goto done_free;
+
+	pm_runtime_get_sync(dev->dev);
+
+	amdgpu_acpi_fini(adev);
+
+	amdgpu_device_fini(adev);
+
+done_free:
+	kfree(adev);
+	dev->dev_private = NULL;
+	return 0;
+}
+
+/**
+ * amdgpu_driver_load_kms - Main load function for KMS.
+ *
+ * @dev: drm dev pointer
+ * @flags: device flags
+ *
+ * This is the main load function for KMS (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
+{
+	struct amdgpu_device *adev;
+	int r, acpi_status;
+
+	adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
+	if (adev == NULL) {
+		return -ENOMEM;
+	}
+	dev->dev_private = (void *)adev;
+
+	if ((amdgpu_runtime_pm != 0) &&
+	    amdgpu_has_atpx() &&
+	    ((flags & AMDGPU_IS_APU) == 0))
+		flags |= AMDGPU_IS_PX;
+
+	/* amdgpu_device_init should report only fatal error
+	 * like memory allocation failure or iomapping failure,
+	 * or memory manager initialization failure, it must
+	 * properly initialize the GPU MC controller and permit
+	 * VRAM allocation
+	 */
+	r = amdgpu_device_init(adev, dev, dev->pdev, flags);
+	if (r) {
+		dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
+		goto out;
+	}
+
+	/* Call ACPI methods: require modeset init
+	 * but failure is not fatal
+	 */
+	if (!r) {
+		acpi_status = amdgpu_acpi_init(adev);
+		if (acpi_status)
+		dev_dbg(&dev->pdev->dev,
+				"Error during ACPI methods call\n");
+	}
+
+	if (amdgpu_device_is_px(dev)) {
+		pm_runtime_use_autosuspend(dev->dev);
+		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
+		pm_runtime_set_active(dev->dev);
+		pm_runtime_allow(dev->dev);
+		pm_runtime_mark_last_busy(dev->dev);
+		pm_runtime_put_autosuspend(dev->dev);
+	}
+
+out:
+	if (r)
+		amdgpu_driver_unload_kms(dev);
+
+
+	return r;
+}
+
+/*
+ * Userspace get information ioctl
+ */
+/**
+ * amdgpu_info_ioctl - answer a device specific request.
+ *
+ * @adev: amdgpu device pointer
+ * @data: request object
+ * @filp: drm filp
+ *
+ * This function is used to pass device specific parameters to the userspace
+ * drivers.  Examples include: pci device id, pipeline parms, tiling params,
+ * etc. (all asics).
+ * Returns 0 on success, -EINVAL on failure.
+ */
+static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+	struct drm_amdgpu_info *info = data;
+	struct amdgpu_mode_info *minfo = &adev->mode_info;
+	void __user *out = (void __user *)(long)info->return_pointer;
+	uint32_t size = info->return_size;
+	struct drm_crtc *crtc;
+	uint32_t ui32 = 0;
+	uint64_t ui64 = 0;
+	int i, found;
+
+	if (!info->return_size || !info->return_pointer)
+		return -EINVAL;
+
+	switch (info->query) {
+	case AMDGPU_INFO_ACCEL_WORKING:
+		ui32 = adev->accel_working;
+		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
+	case AMDGPU_INFO_CRTC_FROM_ID:
+		for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
+			crtc = (struct drm_crtc *)minfo->crtcs[i];
+			if (crtc && crtc->base.id == info->mode_crtc.id) {
+				struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+				ui32 = amdgpu_crtc->crtc_id;
+				found = 1;
+				break;
+			}
+		}
+		if (!found) {
+			DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
+			return -EINVAL;
+		}
+		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
+	case AMDGPU_INFO_HW_IP_INFO: {
+		struct drm_amdgpu_info_hw_ip ip = {};
+		enum amd_ip_block_type type;
+		uint32_t ring_mask = 0;
+		uint32_t ib_start_alignment = 0;
+		uint32_t ib_size_alignment = 0;
+
+		if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
+			return -EINVAL;
+
+		switch (info->query_hw_ip.type) {
+		case AMDGPU_HW_IP_GFX:
+			type = AMD_IP_BLOCK_TYPE_GFX;
+			for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+				ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i);
+			ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+			ib_size_alignment = 8;
+			break;
+		case AMDGPU_HW_IP_COMPUTE:
+			type = AMD_IP_BLOCK_TYPE_GFX;
+			for (i = 0; i < adev->gfx.num_compute_rings; i++)
+				ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i);
+			ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+			ib_size_alignment = 8;
+			break;
+		case AMDGPU_HW_IP_DMA:
+			type = AMD_IP_BLOCK_TYPE_SDMA;
+			ring_mask = adev->sdma[0].ring.ready ? 1 : 0;
+			ring_mask |= ((adev->sdma[1].ring.ready ? 1 : 0) << 1);
+			ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+			ib_size_alignment = 1;
+			break;
+		case AMDGPU_HW_IP_UVD:
+			type = AMD_IP_BLOCK_TYPE_UVD;
+			ring_mask = adev->uvd.ring.ready ? 1 : 0;
+			ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+			ib_size_alignment = 8;
+			break;
+		case AMDGPU_HW_IP_VCE:
+			type = AMD_IP_BLOCK_TYPE_VCE;
+			for (i = 0; i < AMDGPU_MAX_VCE_RINGS; i++)
+				ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
+			ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+			ib_size_alignment = 8;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		for (i = 0; i < adev->num_ip_blocks; i++) {
+			if (adev->ip_blocks[i].type == type &&
+			    adev->ip_block_enabled[i]) {
+				ip.hw_ip_version_major = adev->ip_blocks[i].major;
+				ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
+				ip.capabilities_flags = 0;
+				ip.available_rings = ring_mask;
+				ip.ib_start_alignment = ib_start_alignment;
+				ip.ib_size_alignment = ib_size_alignment;
+				break;
+			}
+		}
+		return copy_to_user(out, &ip,
+				    min((size_t)size, sizeof(ip))) ? -EFAULT : 0;
+	}
+	case AMDGPU_INFO_HW_IP_COUNT: {
+		enum amd_ip_block_type type;
+		uint32_t count = 0;
+
+		switch (info->query_hw_ip.type) {
+		case AMDGPU_HW_IP_GFX:
+			type = AMD_IP_BLOCK_TYPE_GFX;
+			break;
+		case AMDGPU_HW_IP_COMPUTE:
+			type = AMD_IP_BLOCK_TYPE_GFX;
+			break;
+		case AMDGPU_HW_IP_DMA:
+			type = AMD_IP_BLOCK_TYPE_SDMA;
+			break;
+		case AMDGPU_HW_IP_UVD:
+			type = AMD_IP_BLOCK_TYPE_UVD;
+			break;
+		case AMDGPU_HW_IP_VCE:
+			type = AMD_IP_BLOCK_TYPE_VCE;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		for (i = 0; i < adev->num_ip_blocks; i++)
+			if (adev->ip_blocks[i].type == type &&
+			    adev->ip_block_enabled[i] &&
+			    count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
+				count++;
+
+		return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
+	}
+	case AMDGPU_INFO_TIMESTAMP:
+		ui64 = amdgpu_asic_get_gpu_clock_counter(adev);
+		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
+	case AMDGPU_INFO_FW_VERSION: {
+		struct drm_amdgpu_info_firmware fw_info;
+
+		/* We only support one instance of each IP block right now. */
+		if (info->query_fw.ip_instance != 0)
+			return -EINVAL;
+
+		switch (info->query_fw.fw_type) {
+		case AMDGPU_INFO_FW_VCE:
+			fw_info.ver = adev->vce.fw_version;
+			fw_info.feature = adev->vce.fb_version;
+			break;
+		case AMDGPU_INFO_FW_UVD:
+			fw_info.ver = 0;
+			fw_info.feature = 0;
+			break;
+		case AMDGPU_INFO_FW_GMC:
+			fw_info.ver = adev->mc.fw_version;
+			fw_info.feature = 0;
+			break;
+		case AMDGPU_INFO_FW_GFX_ME:
+			fw_info.ver = adev->gfx.me_fw_version;
+			fw_info.feature = adev->gfx.me_feature_version;
+			break;
+		case AMDGPU_INFO_FW_GFX_PFP:
+			fw_info.ver = adev->gfx.pfp_fw_version;
+			fw_info.feature = adev->gfx.pfp_feature_version;
+			break;
+		case AMDGPU_INFO_FW_GFX_CE:
+			fw_info.ver = adev->gfx.ce_fw_version;
+			fw_info.feature = adev->gfx.ce_feature_version;
+			break;
+		case AMDGPU_INFO_FW_GFX_RLC:
+			fw_info.ver = adev->gfx.rlc_fw_version;
+			fw_info.feature = 0;
+			break;
+		case AMDGPU_INFO_FW_GFX_MEC:
+			if (info->query_fw.index == 0)
+				fw_info.ver = adev->gfx.mec_fw_version;
+			else if (info->query_fw.index == 1)
+				fw_info.ver = adev->gfx.mec2_fw_version;
+			else
+				return -EINVAL;
+			fw_info.feature = 0;
+			break;
+		case AMDGPU_INFO_FW_SMC:
+			fw_info.ver = adev->pm.fw_version;
+			fw_info.feature = 0;
+			break;
+		case AMDGPU_INFO_FW_SDMA:
+			if (info->query_fw.index >= 2)
+				return -EINVAL;
+			fw_info.ver = adev->sdma[info->query_fw.index].fw_version;
+			fw_info.feature = 0;
+			break;
+		default:
+			return -EINVAL;
+		}
+		return copy_to_user(out, &fw_info,
+				    min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
+	}
+	case AMDGPU_INFO_NUM_BYTES_MOVED:
+		ui64 = atomic64_read(&adev->num_bytes_moved);
+		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
+	case AMDGPU_INFO_VRAM_USAGE:
+		ui64 = atomic64_read(&adev->vram_usage);
+		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
+	case AMDGPU_INFO_VIS_VRAM_USAGE:
+		ui64 = atomic64_read(&adev->vram_vis_usage);
+		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
+	case AMDGPU_INFO_GTT_USAGE:
+		ui64 = atomic64_read(&adev->gtt_usage);
+		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
+	case AMDGPU_INFO_GDS_CONFIG: {
+		struct drm_amdgpu_info_gds gds_info;
+
+		memset(&gds_info, 0, sizeof(gds_info));
+		gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size >> AMDGPU_GDS_SHIFT;
+		gds_info.compute_partition_size = adev->gds.mem.cs_partition_size >> AMDGPU_GDS_SHIFT;
+		gds_info.gds_total_size = adev->gds.mem.total_size >> AMDGPU_GDS_SHIFT;
+		gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size >> AMDGPU_GWS_SHIFT;
+		gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size >> AMDGPU_GWS_SHIFT;
+		gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size >> AMDGPU_OA_SHIFT;
+		gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size >> AMDGPU_OA_SHIFT;
+		return copy_to_user(out, &gds_info,
+				    min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
+	}
+	case AMDGPU_INFO_VRAM_GTT: {
+		struct drm_amdgpu_info_vram_gtt vram_gtt;
+
+		vram_gtt.vram_size = adev->mc.real_vram_size;
+		vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
+		vram_gtt.vram_cpu_accessible_size -= adev->vram_pin_size;
+		vram_gtt.gtt_size  = adev->mc.gtt_size;
+		vram_gtt.gtt_size -= adev->gart_pin_size;
+		return copy_to_user(out, &vram_gtt,
+				    min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
+	}
+	case AMDGPU_INFO_READ_MMR_REG: {
+		unsigned n, alloc_size = info->read_mmr_reg.count * 4;
+		uint32_t *regs;
+		unsigned se_num = (info->read_mmr_reg.instance >>
+				   AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
+				  AMDGPU_INFO_MMR_SE_INDEX_MASK;
+		unsigned sh_num = (info->read_mmr_reg.instance >>
+				   AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
+				  AMDGPU_INFO_MMR_SH_INDEX_MASK;
+
+		/* set full masks if the userspace set all bits
+		 * in the bitfields */
+		if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
+			se_num = 0xffffffff;
+		if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
+			sh_num = 0xffffffff;
+
+		regs = kmalloc(alloc_size, GFP_KERNEL);
+		if (!regs)
+			return -ENOMEM;
+
+		for (i = 0; i < info->read_mmr_reg.count; i++)
+			if (amdgpu_asic_read_register(adev, se_num, sh_num,
+						      info->read_mmr_reg.dword_offset + i,
+						      &regs[i])) {
+				DRM_DEBUG_KMS("unallowed offset %#x\n",
+					      info->read_mmr_reg.dword_offset + i);
+				kfree(regs);
+				return -EFAULT;
+			}
+		n = copy_to_user(out, regs, min(size, alloc_size));
+		kfree(regs);
+		return n ? -EFAULT : 0;
+	}
+	case AMDGPU_INFO_DEV_INFO: {
+		struct drm_amdgpu_info_device dev_info;
+		struct amdgpu_cu_info cu_info;
+
+		dev_info.device_id = dev->pdev->device;
+		dev_info.chip_rev = adev->rev_id;
+		dev_info.external_rev = adev->external_rev_id;
+		dev_info.pci_rev = dev->pdev->revision;
+		dev_info.family = adev->family;
+		dev_info.num_shader_engines = adev->gfx.config.max_shader_engines;
+		dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
+		/* return all clocks in KHz */
+		dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
+		if (adev->pm.dpm_enabled) {
+			dev_info.max_engine_clock =
+				adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
+			dev_info.max_memory_clock =
+				adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk * 10;
+		} else {
+			dev_info.max_engine_clock = adev->pm.default_sclk * 10;
+			dev_info.max_memory_clock = adev->pm.default_mclk * 10;
+		}
+		dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
+		dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
+					adev->gfx.config.max_shader_engines;
+		dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
+		dev_info._pad = 0;
+		dev_info.ids_flags = 0;
+		if (adev->flags & AMDGPU_IS_APU)
+			dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
+		dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
+		dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
+		dev_info.virtual_address_alignment = max(PAGE_SIZE, 0x10000UL);
+		dev_info.pte_fragment_size = (1 << AMDGPU_LOG2_PAGES_PER_FRAG) *
+					     AMDGPU_GPU_PAGE_SIZE;
+		dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
+
+		amdgpu_asic_get_cu_info(adev, &cu_info);
+		dev_info.cu_active_number = cu_info.number;
+		dev_info.cu_ao_mask = cu_info.ao_cu_mask;
+		dev_info.ce_ram_size = adev->gfx.ce_ram_size;
+		memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap));
+		dev_info.vram_type = adev->mc.vram_type;
+		dev_info.vram_bit_width = adev->mc.vram_width;
+
+		return copy_to_user(out, &dev_info,
+				    min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
+	}
+	default:
+		DRM_DEBUG_KMS("Invalid request %d\n", info->query);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+
+/*
+ * Outdated mess for old drm with Xorg being in charge (void function now).
+ */
+/**
+ * amdgpu_driver_firstopen_kms - drm callback for last close
+ *
+ * @dev: drm dev pointer
+ *
+ * Switch vga switcheroo state after last close (all asics).
+ */
+void amdgpu_driver_lastclose_kms(struct drm_device *dev)
+{
+	vga_switcheroo_process_delayed_switch();
+}
+
+/**
+ * amdgpu_driver_open_kms - drm callback for open
+ *
+ * @dev: drm dev pointer
+ * @file_priv: drm file
+ *
+ * On device open, init vm on cayman+ (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_fpriv *fpriv;
+	int r;
+
+	file_priv->driver_priv = NULL;
+
+	r = pm_runtime_get_sync(dev->dev);
+	if (r < 0)
+		return r;
+
+	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+	if (unlikely(!fpriv))
+		return -ENOMEM;
+
+	r = amdgpu_vm_init(adev, &fpriv->vm);
+	if (r)
+		goto error_free;
+
+	mutex_init(&fpriv->bo_list_lock);
+	idr_init(&fpriv->bo_list_handles);
+
+	/* init context manager */
+	mutex_init(&fpriv->ctx_mgr.lock);
+	idr_init(&fpriv->ctx_mgr.ctx_handles);
+	fpriv->ctx_mgr.adev = adev;
+
+	file_priv->driver_priv = fpriv;
+
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
+	return 0;
+
+error_free:
+	kfree(fpriv);
+
+	return r;
+}
+
+/**
+ * amdgpu_driver_postclose_kms - drm callback for post close
+ *
+ * @dev: drm dev pointer
+ * @file_priv: drm file
+ *
+ * On device post close, tear down vm on cayman+ (all asics).
+ */
+void amdgpu_driver_postclose_kms(struct drm_device *dev,
+				 struct drm_file *file_priv)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+	struct amdgpu_bo_list *list;
+	int handle;
+
+	if (!fpriv)
+		return;
+
+	amdgpu_vm_fini(adev, &fpriv->vm);
+
+	idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
+		amdgpu_bo_list_free(list);
+
+	idr_destroy(&fpriv->bo_list_handles);
+	mutex_destroy(&fpriv->bo_list_lock);
+
+	/* release context */
+	amdgpu_ctx_fini(fpriv);
+
+	kfree(fpriv);
+	file_priv->driver_priv = NULL;
+}
+
+/**
+ * amdgpu_driver_preclose_kms - drm callback for pre close
+ *
+ * @dev: drm dev pointer
+ * @file_priv: drm file
+ *
+ * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx
+ * (all asics).
+ */
+void amdgpu_driver_preclose_kms(struct drm_device *dev,
+				struct drm_file *file_priv)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+
+	amdgpu_uvd_free_handles(adev, file_priv);
+	amdgpu_vce_free_handles(adev, file_priv);
+}
+
+/*
+ * VBlank related functions.
+ */
+/**
+ * amdgpu_get_vblank_counter_kms - get frame count
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to get the frame count from
+ *
+ * Gets the frame count on the requested crtc (all asics).
+ * Returns frame count on success, -EINVAL on failure.
+ */
+u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, int crtc)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+
+	if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
+		return -EINVAL;
+	}
+
+	return amdgpu_display_vblank_get_counter(adev, crtc);
+}
+
+/**
+ * amdgpu_enable_vblank_kms - enable vblank interrupt
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to enable vblank interrupt for
+ *
+ * Enable the interrupt on the requested crtc (all asics).
+ * Returns 0 on success, -EINVAL on failure.
+ */
+int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+	int idx = amdgpu_crtc_idx_to_irq_type(adev, crtc);
+
+	return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
+}
+
+/**
+ * amdgpu_disable_vblank_kms - disable vblank interrupt
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to disable vblank interrupt for
+ *
+ * Disable the interrupt on the requested crtc (all asics).
+ */
+void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+	int idx = amdgpu_crtc_idx_to_irq_type(adev, crtc);
+
+	amdgpu_irq_put(adev, &adev->crtc_irq, idx);
+}
+
+/**
+ * amdgpu_get_vblank_timestamp_kms - get vblank timestamp
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to get the timestamp for
+ * @max_error: max error
+ * @vblank_time: time value
+ * @flags: flags passed to the driver
+ *
+ * Gets the timestamp on the requested crtc based on the
+ * scanout position.  (all asics).
+ * Returns postive status flags on success, negative error on failure.
+ */
+int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+				    int *max_error,
+				    struct timeval *vblank_time,
+				    unsigned flags)
+{
+	struct drm_crtc *drmcrtc;
+	struct amdgpu_device *adev = dev->dev_private;
+
+	if (crtc < 0 || crtc >= dev->num_crtcs) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
+		return -EINVAL;
+	}
+
+	/* Get associated drm_crtc: */
+	drmcrtc = &adev->mode_info.crtcs[crtc]->base;
+
+	/* Helper routine in DRM core does all the work: */
+	return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
+						     vblank_time, flags,
+						     drmcrtc, &drmcrtc->hwmode);
+}
+
+const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
+	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	/* KMS */
+	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+};
+int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);

+ 322 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c

@@ -0,0 +1,322 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/mmu_notifier.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "amdgpu.h"
+
+struct amdgpu_mn {
+	/* constant after initialisation */
+	struct amdgpu_device	*adev;
+	struct mm_struct	*mm;
+	struct mmu_notifier	mn;
+
+	/* only used on destruction */
+	struct work_struct	work;
+
+	/* protected by adev->mn_lock */
+	struct hlist_node	node;
+
+	/* objects protected by lock */
+	struct mutex		lock;
+	struct rb_root		objects;
+};
+
+struct amdgpu_mn_node {
+	struct interval_tree_node	it;
+	struct list_head		bos;
+};
+
+/**
+ * amdgpu_mn_destroy - destroy the rmn
+ *
+ * @work: previously sheduled work item
+ *
+ * Lazy destroys the notifier from a work item
+ */
+static void amdgpu_mn_destroy(struct work_struct *work)
+{
+	struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work);
+	struct amdgpu_device *adev = rmn->adev;
+	struct amdgpu_mn_node *node, *next_node;
+	struct amdgpu_bo *bo, *next_bo;
+
+	mutex_lock(&adev->mn_lock);
+	mutex_lock(&rmn->lock);
+	hash_del(&rmn->node);
+	rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
+					     it.rb) {
+
+		interval_tree_remove(&node->it, &rmn->objects);
+		list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
+			bo->mn = NULL;
+			list_del_init(&bo->mn_list);
+		}
+		kfree(node);
+	}
+	mutex_unlock(&rmn->lock);
+	mutex_unlock(&adev->mn_lock);
+	mmu_notifier_unregister(&rmn->mn, rmn->mm);
+	kfree(rmn);
+}
+
+/**
+ * amdgpu_mn_release - callback to notify about mm destruction
+ *
+ * @mn: our notifier
+ * @mn: the mm this callback is about
+ *
+ * Shedule a work item to lazy destroy our notifier.
+ */
+static void amdgpu_mn_release(struct mmu_notifier *mn,
+			      struct mm_struct *mm)
+{
+	struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+	INIT_WORK(&rmn->work, amdgpu_mn_destroy);
+	schedule_work(&rmn->work);
+}
+
+/**
+ * amdgpu_mn_invalidate_range_start - callback to notify about mm change
+ *
+ * @mn: our notifier
+ * @mn: the mm this callback is about
+ * @start: start of updated range
+ * @end: end of updated range
+ *
+ * We block for all BOs between start and end to be idle and
+ * unmap them by move them into system domain again.
+ */
+static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
+					     struct mm_struct *mm,
+					     unsigned long start,
+					     unsigned long end)
+{
+	struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+	struct interval_tree_node *it;
+
+	/* notification is exclusive, but interval is inclusive */
+	end -= 1;
+
+	mutex_lock(&rmn->lock);
+
+	it = interval_tree_iter_first(&rmn->objects, start, end);
+	while (it) {
+		struct amdgpu_mn_node *node;
+		struct amdgpu_bo *bo;
+		long r;
+
+		node = container_of(it, struct amdgpu_mn_node, it);
+		it = interval_tree_iter_next(it, start, end);
+
+		list_for_each_entry(bo, &node->bos, mn_list) {
+
+			if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
+				continue;
+
+			r = amdgpu_bo_reserve(bo, true);
+			if (r) {
+				DRM_ERROR("(%ld) failed to reserve user bo\n", r);
+				continue;
+			}
+
+			r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
+				true, false, MAX_SCHEDULE_TIMEOUT);
+			if (r <= 0)
+				DRM_ERROR("(%ld) failed to wait for user bo\n", r);
+
+			amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+			r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+			if (r)
+				DRM_ERROR("(%ld) failed to validate user bo\n", r);
+
+			amdgpu_bo_unreserve(bo);
+		}
+	}
+
+	mutex_unlock(&rmn->lock);
+}
+
+static const struct mmu_notifier_ops amdgpu_mn_ops = {
+	.release = amdgpu_mn_release,
+	.invalidate_range_start = amdgpu_mn_invalidate_range_start,
+};
+
+/**
+ * amdgpu_mn_get - create notifier context
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Creates a notifier context for current->mm.
+ */
+static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
+{
+	struct mm_struct *mm = current->mm;
+	struct amdgpu_mn *rmn;
+	int r;
+
+	down_write(&mm->mmap_sem);
+	mutex_lock(&adev->mn_lock);
+
+	hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm)
+		if (rmn->mm == mm)
+			goto release_locks;
+
+	rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
+	if (!rmn) {
+		rmn = ERR_PTR(-ENOMEM);
+		goto release_locks;
+	}
+
+	rmn->adev = adev;
+	rmn->mm = mm;
+	rmn->mn.ops = &amdgpu_mn_ops;
+	mutex_init(&rmn->lock);
+	rmn->objects = RB_ROOT;
+
+	r = __mmu_notifier_register(&rmn->mn, mm);
+	if (r)
+		goto free_rmn;
+
+	hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm);
+
+release_locks:
+	mutex_unlock(&adev->mn_lock);
+	up_write(&mm->mmap_sem);
+
+	return rmn;
+
+free_rmn:
+	mutex_unlock(&adev->mn_lock);
+	up_write(&mm->mmap_sem);
+	kfree(rmn);
+
+	return ERR_PTR(r);
+}
+
+/**
+ * amdgpu_mn_register - register a BO for notifier updates
+ *
+ * @bo: amdgpu buffer object
+ * @addr: userptr addr we should monitor
+ *
+ * Registers an MMU notifier for the given BO at the specified address.
+ * Returns 0 on success, -ERRNO if anything goes wrong.
+ */
+int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
+{
+	unsigned long end = addr + amdgpu_bo_size(bo) - 1;
+	struct amdgpu_device *adev = bo->adev;
+	struct amdgpu_mn *rmn;
+	struct amdgpu_mn_node *node = NULL;
+	struct list_head bos;
+	struct interval_tree_node *it;
+
+	rmn = amdgpu_mn_get(adev);
+	if (IS_ERR(rmn))
+		return PTR_ERR(rmn);
+
+	INIT_LIST_HEAD(&bos);
+
+	mutex_lock(&rmn->lock);
+
+	while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
+		kfree(node);
+		node = container_of(it, struct amdgpu_mn_node, it);
+		interval_tree_remove(&node->it, &rmn->objects);
+		addr = min(it->start, addr);
+		end = max(it->last, end);
+		list_splice(&node->bos, &bos);
+	}
+
+	if (!node) {
+		node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
+		if (!node) {
+			mutex_unlock(&rmn->lock);
+			return -ENOMEM;
+		}
+	}
+
+	bo->mn = rmn;
+
+	node->it.start = addr;
+	node->it.last = end;
+	INIT_LIST_HEAD(&node->bos);
+	list_splice(&bos, &node->bos);
+	list_add(&bo->mn_list, &node->bos);
+
+	interval_tree_insert(&node->it, &rmn->objects);
+
+	mutex_unlock(&rmn->lock);
+
+	return 0;
+}
+
+/**
+ * amdgpu_mn_unregister - unregister a BO for notifier updates
+ *
+ * @bo: amdgpu buffer object
+ *
+ * Remove any registration of MMU notifier updates from the buffer object.
+ */
+void amdgpu_mn_unregister(struct amdgpu_bo *bo)
+{
+	struct amdgpu_device *adev = bo->adev;
+	struct amdgpu_mn *rmn;
+	struct list_head *head;
+
+	mutex_lock(&adev->mn_lock);
+	rmn = bo->mn;
+	if (rmn == NULL) {
+		mutex_unlock(&adev->mn_lock);
+		return;
+	}
+
+	mutex_lock(&rmn->lock);
+	/* save the next list entry for later */
+	head = bo->mn_list.next;
+
+	bo->mn = NULL;
+	list_del(&bo->mn_list);
+
+	if (list_empty(head)) {
+		struct amdgpu_mn_node *node;
+		node = container_of(head, struct amdgpu_mn_node, bos);
+		interval_tree_remove(&node->it, &rmn->objects);
+		kfree(node);
+	}
+
+	mutex_unlock(&rmn->lock);
+	mutex_unlock(&adev->mn_lock);
+}

+ 586 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h

@@ -0,0 +1,586 @@
+/*
+ * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
+ *                VA Linux Systems Inc., Fremont, California.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Original Authors:
+ *   Kevin E. Martin, Rickard E. Faith, Alan Hourihane
+ *
+ * Kernel port Author: Dave Airlie
+ */
+
+#ifndef AMDGPU_MODE_H
+#define AMDGPU_MODE_H
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_fixed.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+struct amdgpu_bo;
+struct amdgpu_device;
+struct amdgpu_encoder;
+struct amdgpu_router;
+struct amdgpu_hpd;
+
+#define to_amdgpu_crtc(x) container_of(x, struct amdgpu_crtc, base)
+#define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
+#define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base)
+#define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base)
+
+#define AMDGPU_MAX_HPD_PINS 6
+#define AMDGPU_MAX_CRTCS 6
+#define AMDGPU_MAX_AFMT_BLOCKS 7
+
+enum amdgpu_rmx_type {
+	RMX_OFF,
+	RMX_FULL,
+	RMX_CENTER,
+	RMX_ASPECT
+};
+
+enum amdgpu_underscan_type {
+	UNDERSCAN_OFF,
+	UNDERSCAN_ON,
+	UNDERSCAN_AUTO,
+};
+
+#define AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS 50
+#define AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS 10
+
+enum amdgpu_hpd_id {
+	AMDGPU_HPD_1 = 0,
+	AMDGPU_HPD_2,
+	AMDGPU_HPD_3,
+	AMDGPU_HPD_4,
+	AMDGPU_HPD_5,
+	AMDGPU_HPD_6,
+	AMDGPU_HPD_LAST,
+	AMDGPU_HPD_NONE = 0xff,
+};
+
+enum amdgpu_crtc_irq {
+	AMDGPU_CRTC_IRQ_VBLANK1 = 0,
+	AMDGPU_CRTC_IRQ_VBLANK2,
+	AMDGPU_CRTC_IRQ_VBLANK3,
+	AMDGPU_CRTC_IRQ_VBLANK4,
+	AMDGPU_CRTC_IRQ_VBLANK5,
+	AMDGPU_CRTC_IRQ_VBLANK6,
+	AMDGPU_CRTC_IRQ_VLINE1,
+	AMDGPU_CRTC_IRQ_VLINE2,
+	AMDGPU_CRTC_IRQ_VLINE3,
+	AMDGPU_CRTC_IRQ_VLINE4,
+	AMDGPU_CRTC_IRQ_VLINE5,
+	AMDGPU_CRTC_IRQ_VLINE6,
+	AMDGPU_CRTC_IRQ_LAST,
+	AMDGPU_CRTC_IRQ_NONE = 0xff
+};
+
+enum amdgpu_pageflip_irq {
+	AMDGPU_PAGEFLIP_IRQ_D1 = 0,
+	AMDGPU_PAGEFLIP_IRQ_D2,
+	AMDGPU_PAGEFLIP_IRQ_D3,
+	AMDGPU_PAGEFLIP_IRQ_D4,
+	AMDGPU_PAGEFLIP_IRQ_D5,
+	AMDGPU_PAGEFLIP_IRQ_D6,
+	AMDGPU_PAGEFLIP_IRQ_LAST,
+	AMDGPU_PAGEFLIP_IRQ_NONE = 0xff
+};
+
+enum amdgpu_flip_status {
+	AMDGPU_FLIP_NONE,
+	AMDGPU_FLIP_PENDING,
+	AMDGPU_FLIP_SUBMITTED
+};
+
+#define AMDGPU_MAX_I2C_BUS 16
+
+/* amdgpu gpio-based i2c
+ * 1. "mask" reg and bits
+ *    grabs the gpio pins for software use
+ *    0=not held  1=held
+ * 2. "a" reg and bits
+ *    output pin value
+ *    0=low 1=high
+ * 3. "en" reg and bits
+ *    sets the pin direction
+ *    0=input 1=output
+ * 4. "y" reg and bits
+ *    input pin value
+ *    0=low 1=high
+ */
+struct amdgpu_i2c_bus_rec {
+	bool valid;
+	/* id used by atom */
+	uint8_t i2c_id;
+	/* id used by atom */
+	enum amdgpu_hpd_id hpd;
+	/* can be used with hw i2c engine */
+	bool hw_capable;
+	/* uses multi-media i2c engine */
+	bool mm_i2c;
+	/* regs and bits */
+	uint32_t mask_clk_reg;
+	uint32_t mask_data_reg;
+	uint32_t a_clk_reg;
+	uint32_t a_data_reg;
+	uint32_t en_clk_reg;
+	uint32_t en_data_reg;
+	uint32_t y_clk_reg;
+	uint32_t y_data_reg;
+	uint32_t mask_clk_mask;
+	uint32_t mask_data_mask;
+	uint32_t a_clk_mask;
+	uint32_t a_data_mask;
+	uint32_t en_clk_mask;
+	uint32_t en_data_mask;
+	uint32_t y_clk_mask;
+	uint32_t y_data_mask;
+};
+
+#define AMDGPU_MAX_BIOS_CONNECTOR 16
+
+/* pll flags */
+#define AMDGPU_PLL_USE_BIOS_DIVS        (1 << 0)
+#define AMDGPU_PLL_NO_ODD_POST_DIV      (1 << 1)
+#define AMDGPU_PLL_USE_REF_DIV          (1 << 2)
+#define AMDGPU_PLL_LEGACY               (1 << 3)
+#define AMDGPU_PLL_PREFER_LOW_REF_DIV   (1 << 4)
+#define AMDGPU_PLL_PREFER_HIGH_REF_DIV  (1 << 5)
+#define AMDGPU_PLL_PREFER_LOW_FB_DIV    (1 << 6)
+#define AMDGPU_PLL_PREFER_HIGH_FB_DIV   (1 << 7)
+#define AMDGPU_PLL_PREFER_LOW_POST_DIV  (1 << 8)
+#define AMDGPU_PLL_PREFER_HIGH_POST_DIV (1 << 9)
+#define AMDGPU_PLL_USE_FRAC_FB_DIV      (1 << 10)
+#define AMDGPU_PLL_PREFER_CLOSEST_LOWER (1 << 11)
+#define AMDGPU_PLL_USE_POST_DIV         (1 << 12)
+#define AMDGPU_PLL_IS_LCD               (1 << 13)
+#define AMDGPU_PLL_PREFER_MINM_OVER_MAXP (1 << 14)
+
+struct amdgpu_pll {
+	/* reference frequency */
+	uint32_t reference_freq;
+
+	/* fixed dividers */
+	uint32_t reference_div;
+	uint32_t post_div;
+
+	/* pll in/out limits */
+	uint32_t pll_in_min;
+	uint32_t pll_in_max;
+	uint32_t pll_out_min;
+	uint32_t pll_out_max;
+	uint32_t lcd_pll_out_min;
+	uint32_t lcd_pll_out_max;
+	uint32_t best_vco;
+
+	/* divider limits */
+	uint32_t min_ref_div;
+	uint32_t max_ref_div;
+	uint32_t min_post_div;
+	uint32_t max_post_div;
+	uint32_t min_feedback_div;
+	uint32_t max_feedback_div;
+	uint32_t min_frac_feedback_div;
+	uint32_t max_frac_feedback_div;
+
+	/* flags for the current clock */
+	uint32_t flags;
+
+	/* pll id */
+	uint32_t id;
+};
+
+struct amdgpu_i2c_chan {
+	struct i2c_adapter adapter;
+	struct drm_device *dev;
+	struct i2c_algo_bit_data bit;
+	struct amdgpu_i2c_bus_rec rec;
+	struct drm_dp_aux aux;
+	bool has_aux;
+	struct mutex mutex;
+};
+
+struct amdgpu_fbdev;
+
+struct amdgpu_afmt {
+	bool enabled;
+	int offset;
+	bool last_buffer_filled_status;
+	int id;
+	struct amdgpu_audio_pin *pin;
+};
+
+/*
+ * Audio
+ */
+struct amdgpu_audio_pin {
+	int			channels;
+	int			rate;
+	int			bits_per_sample;
+	u8			status_bits;
+	u8			category_code;
+	u32			offset;
+	bool			connected;
+	u32			id;
+};
+
+struct amdgpu_audio {
+	bool enabled;
+	struct amdgpu_audio_pin pin[AMDGPU_MAX_AFMT_BLOCKS];
+	int num_pins;
+};
+
+struct amdgpu_mode_mc_save {
+	u32 vga_render_control;
+	u32 vga_hdp_control;
+	bool crtc_enabled[AMDGPU_MAX_CRTCS];
+};
+
+struct amdgpu_display_funcs {
+	/* vga render */
+	void (*set_vga_render_state)(struct amdgpu_device *adev, bool render);
+	/* display watermarks */
+	void (*bandwidth_update)(struct amdgpu_device *adev);
+	/* get frame count */
+	u32 (*vblank_get_counter)(struct amdgpu_device *adev, int crtc);
+	/* wait for vblank */
+	void (*vblank_wait)(struct amdgpu_device *adev, int crtc);
+	/* is dce hung */
+	bool (*is_display_hung)(struct amdgpu_device *adev);
+	/* set backlight level */
+	void (*backlight_set_level)(struct amdgpu_encoder *amdgpu_encoder,
+				    u8 level);
+	/* get backlight level */
+	u8 (*backlight_get_level)(struct amdgpu_encoder *amdgpu_encoder);
+	/* hotplug detect */
+	bool (*hpd_sense)(struct amdgpu_device *adev, enum amdgpu_hpd_id hpd);
+	void (*hpd_set_polarity)(struct amdgpu_device *adev,
+				 enum amdgpu_hpd_id hpd);
+	u32 (*hpd_get_gpio_reg)(struct amdgpu_device *adev);
+	/* pageflipping */
+	void (*page_flip)(struct amdgpu_device *adev,
+			 int crtc_id, u64 crtc_base);
+	int (*page_flip_get_scanoutpos)(struct amdgpu_device *adev, int crtc,
+					u32 *vbl, u32 *position);
+	/* display topology setup */
+	void (*add_encoder)(struct amdgpu_device *adev,
+			    uint32_t encoder_enum,
+			    uint32_t supported_device,
+			    u16 caps);
+	void (*add_connector)(struct amdgpu_device *adev,
+			      uint32_t connector_id,
+			      uint32_t supported_device,
+			      int connector_type,
+			      struct amdgpu_i2c_bus_rec *i2c_bus,
+			      uint16_t connector_object_id,
+			      struct amdgpu_hpd *hpd,
+			      struct amdgpu_router *router);
+	void (*stop_mc_access)(struct amdgpu_device *adev,
+			       struct amdgpu_mode_mc_save *save);
+	void (*resume_mc_access)(struct amdgpu_device *adev,
+				 struct amdgpu_mode_mc_save *save);
+};
+
+struct amdgpu_mode_info {
+	struct atom_context *atom_context;
+	struct card_info *atom_card_info;
+	bool mode_config_initialized;
+	struct amdgpu_crtc *crtcs[6];
+	struct amdgpu_afmt *afmt[7];
+	/* DVI-I properties */
+	struct drm_property *coherent_mode_property;
+	/* DAC enable load detect */
+	struct drm_property *load_detect_property;
+	/* underscan */
+	struct drm_property *underscan_property;
+	struct drm_property *underscan_hborder_property;
+	struct drm_property *underscan_vborder_property;
+	/* audio */
+	struct drm_property *audio_property;
+	/* FMT dithering */
+	struct drm_property *dither_property;
+	/* hardcoded DFP edid from BIOS */
+	struct edid *bios_hardcoded_edid;
+	int bios_hardcoded_edid_size;
+
+	/* pointer to fbdev info structure */
+	struct amdgpu_fbdev *rfbdev;
+	/* firmware flags */
+	u16 firmware_flags;
+	/* pointer to backlight encoder */
+	struct amdgpu_encoder *bl_encoder;
+	struct amdgpu_audio	audio; /* audio stuff */
+	int			num_crtc; /* number of crtcs */
+	int			num_hpd; /* number of hpd pins */
+	int			num_dig; /* number of dig blocks */
+	int			disp_priority;
+	const struct amdgpu_display_funcs *funcs;
+};
+
+#define AMDGPU_MAX_BL_LEVEL 0xFF
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+struct amdgpu_backlight_privdata {
+	struct amdgpu_encoder *encoder;
+	uint8_t negative;
+};
+
+#endif
+
+struct amdgpu_atom_ss {
+	uint16_t percentage;
+	uint16_t percentage_divider;
+	uint8_t type;
+	uint16_t step;
+	uint8_t delay;
+	uint8_t range;
+	uint8_t refdiv;
+	/* asic_ss */
+	uint16_t rate;
+	uint16_t amount;
+};
+
+struct amdgpu_crtc {
+	struct drm_crtc base;
+	int crtc_id;
+	u16 lut_r[256], lut_g[256], lut_b[256];
+	bool enabled;
+	bool can_tile;
+	uint32_t crtc_offset;
+	struct drm_gem_object *cursor_bo;
+	uint64_t cursor_addr;
+	int cursor_width;
+	int cursor_height;
+	int max_cursor_width;
+	int max_cursor_height;
+	enum amdgpu_rmx_type rmx_type;
+	u8 h_border;
+	u8 v_border;
+	fixed20_12 vsc;
+	fixed20_12 hsc;
+	struct drm_display_mode native_mode;
+	u32 pll_id;
+	/* page flipping */
+	struct workqueue_struct *pflip_queue;
+	struct amdgpu_flip_work *pflip_works;
+	enum amdgpu_flip_status pflip_status;
+	int deferred_flip_completion;
+	/* pll sharing */
+	struct amdgpu_atom_ss ss;
+	bool ss_enabled;
+	u32 adjusted_clock;
+	int bpc;
+	u32 pll_reference_div;
+	u32 pll_post_div;
+	u32 pll_flags;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	/* for dpm */
+	u32 line_time;
+	u32 wm_low;
+	u32 wm_high;
+	struct drm_display_mode hw_mode;
+};
+
+struct amdgpu_encoder_atom_dig {
+	bool linkb;
+	/* atom dig */
+	bool coherent_mode;
+	int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB, etc. */
+	/* atom lvds/edp */
+	uint32_t lcd_misc;
+	uint16_t panel_pwr_delay;
+	uint32_t lcd_ss_id;
+	/* panel mode */
+	struct drm_display_mode native_mode;
+	struct backlight_device *bl_dev;
+	int dpms_mode;
+	uint8_t backlight_level;
+	int panel_mode;
+	struct amdgpu_afmt *afmt;
+};
+
+struct amdgpu_encoder {
+	struct drm_encoder base;
+	uint32_t encoder_enum;
+	uint32_t encoder_id;
+	uint32_t devices;
+	uint32_t active_device;
+	uint32_t flags;
+	uint32_t pixel_clock;
+	enum amdgpu_rmx_type rmx_type;
+	enum amdgpu_underscan_type underscan_type;
+	uint32_t underscan_hborder;
+	uint32_t underscan_vborder;
+	struct drm_display_mode native_mode;
+	void *enc_priv;
+	int audio_polling_active;
+	bool is_ext_encoder;
+	u16 caps;
+};
+
+struct amdgpu_connector_atom_dig {
+	/* displayport */
+	u8 dpcd[DP_RECEIVER_CAP_SIZE];
+	u8 dp_sink_type;
+	int dp_clock;
+	int dp_lane_count;
+	bool edp_on;
+};
+
+struct amdgpu_gpio_rec {
+	bool valid;
+	u8 id;
+	u32 reg;
+	u32 mask;
+	u32 shift;
+};
+
+struct amdgpu_hpd {
+	enum amdgpu_hpd_id hpd;
+	u8 plugged_state;
+	struct amdgpu_gpio_rec gpio;
+};
+
+struct amdgpu_router {
+	u32 router_id;
+	struct amdgpu_i2c_bus_rec i2c_info;
+	u8 i2c_addr;
+	/* i2c mux */
+	bool ddc_valid;
+	u8 ddc_mux_type;
+	u8 ddc_mux_control_pin;
+	u8 ddc_mux_state;
+	/* clock/data mux */
+	bool cd_valid;
+	u8 cd_mux_type;
+	u8 cd_mux_control_pin;
+	u8 cd_mux_state;
+};
+
+enum amdgpu_connector_audio {
+	AMDGPU_AUDIO_DISABLE = 0,
+	AMDGPU_AUDIO_ENABLE = 1,
+	AMDGPU_AUDIO_AUTO = 2
+};
+
+enum amdgpu_connector_dither {
+	AMDGPU_FMT_DITHER_DISABLE = 0,
+	AMDGPU_FMT_DITHER_ENABLE = 1,
+};
+
+struct amdgpu_connector {
+	struct drm_connector base;
+	uint32_t connector_id;
+	uint32_t devices;
+	struct amdgpu_i2c_chan *ddc_bus;
+	/* some systems have an hdmi and vga port with a shared ddc line */
+	bool shared_ddc;
+	bool use_digital;
+	/* we need to mind the EDID between detect
+	   and get modes due to analog/digital/tvencoder */
+	struct edid *edid;
+	void *con_priv;
+	bool dac_load_detect;
+	bool detected_by_load; /* if the connection status was determined by load */
+	uint16_t connector_object_id;
+	struct amdgpu_hpd hpd;
+	struct amdgpu_router router;
+	struct amdgpu_i2c_chan *router_bus;
+	enum amdgpu_connector_audio audio;
+	enum amdgpu_connector_dither dither;
+	unsigned pixelclock_for_modeset;
+};
+
+struct amdgpu_framebuffer {
+	struct drm_framebuffer base;
+	struct drm_gem_object *obj;
+};
+
+#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
+				((em) == ATOM_ENCODER_MODE_DP_MST))
+
+void amdgpu_link_encoder_connector(struct drm_device *dev);
+
+struct drm_connector *
+amdgpu_get_connector_for_encoder(struct drm_encoder *encoder);
+struct drm_connector *
+amdgpu_get_connector_for_encoder_init(struct drm_encoder *encoder);
+bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder,
+				    u32 pixel_clock);
+
+u16 amdgpu_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
+struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder);
+
+bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector, bool use_aux);
+
+void amdgpu_encoder_set_active_device(struct drm_encoder *encoder);
+
+int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+				      unsigned int flags,
+				      int *vpos, int *hpos, ktime_t *stime,
+				      ktime_t *etime);
+
+int amdgpu_framebuffer_init(struct drm_device *dev,
+			     struct amdgpu_framebuffer *rfb,
+			     struct drm_mode_fb_cmd2 *mode_cmd,
+			     struct drm_gem_object *obj);
+
+int amdgpufb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
+
+void amdgpu_enc_destroy(struct drm_encoder *encoder);
+void amdgpu_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
+bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
+					const struct drm_display_mode *mode,
+					struct drm_display_mode *adjusted_mode);
+void amdgpu_panel_mode_fixup(struct drm_encoder *encoder,
+			     struct drm_display_mode *adjusted_mode);
+int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc);
+
+/* fbdev layer */
+int amdgpu_fbdev_init(struct amdgpu_device *adev);
+void amdgpu_fbdev_fini(struct amdgpu_device *adev);
+void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state);
+int amdgpu_fbdev_total_size(struct amdgpu_device *adev);
+bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj);
+
+void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev);
+
+
+int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled);
+
+/* amdgpu_display.c */
+void amdgpu_print_display_setup(struct drm_device *dev);
+int amdgpu_modeset_create_props(struct amdgpu_device *adev);
+int amdgpu_crtc_set_config(struct drm_mode_set *set);
+int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
+			  struct drm_framebuffer *fb,
+			  struct drm_pending_vblank_event *event,
+			  uint32_t page_flip_flags);
+extern const struct drm_mode_config_funcs amdgpu_mode_funcs;
+
+#endif

+ 671 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c

@@ -0,0 +1,671 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Jerome Glisse <glisse@freedesktop.org>
+ *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ *    Dave Airlie
+ */
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+#include "amdgpu_trace.h"
+
+
+int amdgpu_ttm_init(struct amdgpu_device *adev);
+void amdgpu_ttm_fini(struct amdgpu_device *adev);
+
+static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
+						struct ttm_mem_reg *mem)
+{
+	u64 ret = 0;
+	if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) {
+		ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) >
+			   adev->mc.visible_vram_size ?
+			   adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
+			   mem->size;
+	}
+	return ret;
+}
+
+static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
+		       struct ttm_mem_reg *old_mem,
+		       struct ttm_mem_reg *new_mem)
+{
+	u64 vis_size;
+	if (!adev)
+		return;
+
+	if (new_mem) {
+		switch (new_mem->mem_type) {
+		case TTM_PL_TT:
+			atomic64_add(new_mem->size, &adev->gtt_usage);
+			break;
+		case TTM_PL_VRAM:
+			atomic64_add(new_mem->size, &adev->vram_usage);
+			vis_size = amdgpu_get_vis_part_size(adev, new_mem);
+			atomic64_add(vis_size, &adev->vram_vis_usage);
+			break;
+		}
+	}
+
+	if (old_mem) {
+		switch (old_mem->mem_type) {
+		case TTM_PL_TT:
+			atomic64_sub(old_mem->size, &adev->gtt_usage);
+			break;
+		case TTM_PL_VRAM:
+			atomic64_sub(old_mem->size, &adev->vram_usage);
+			vis_size = amdgpu_get_vis_part_size(adev, old_mem);
+			atomic64_sub(vis_size, &adev->vram_vis_usage);
+			break;
+		}
+	}
+}
+
+static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+{
+	struct amdgpu_bo *bo;
+
+	bo = container_of(tbo, struct amdgpu_bo, tbo);
+
+	amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
+	amdgpu_mn_unregister(bo);
+
+	mutex_lock(&bo->adev->gem.mutex);
+	list_del_init(&bo->list);
+	mutex_unlock(&bo->adev->gem.mutex);
+	drm_gem_object_release(&bo->gem_base);
+	kfree(bo->metadata);
+	kfree(bo);
+}
+
+bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
+{
+	if (bo->destroy == &amdgpu_ttm_bo_destroy)
+		return true;
+	return false;
+}
+
+static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
+				      struct ttm_placement *placement,
+				      struct ttm_place *placements,
+				      u32 domain, u64 flags)
+{
+	u32 c = 0, i;
+
+	placement->placement = placements;
+	placement->busy_placement = placements;
+
+	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
+		if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
+			adev->mc.visible_vram_size < adev->mc.real_vram_size) {
+			placements[c].fpfn =
+				adev->mc.visible_vram_size >> PAGE_SHIFT;
+			placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+				TTM_PL_FLAG_VRAM;
+		}
+		placements[c].fpfn = 0;
+		placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+			TTM_PL_FLAG_VRAM;
+	}
+
+	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
+		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
+			placements[c].fpfn = 0;
+			placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
+				TTM_PL_FLAG_UNCACHED;
+		} else {
+			placements[c].fpfn = 0;
+			placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+		}
+	}
+
+	if (domain & AMDGPU_GEM_DOMAIN_CPU) {
+		if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
+			placements[c].fpfn = 0;
+			placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM |
+				TTM_PL_FLAG_UNCACHED;
+		} else {
+			placements[c].fpfn = 0;
+			placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
+		}
+	}
+
+	if (domain & AMDGPU_GEM_DOMAIN_GDS) {
+		placements[c].fpfn = 0;
+		placements[c++].flags = TTM_PL_FLAG_UNCACHED |
+			AMDGPU_PL_FLAG_GDS;
+	}
+	if (domain & AMDGPU_GEM_DOMAIN_GWS) {
+		placements[c].fpfn = 0;
+		placements[c++].flags = TTM_PL_FLAG_UNCACHED |
+			AMDGPU_PL_FLAG_GWS;
+	}
+	if (domain & AMDGPU_GEM_DOMAIN_OA) {
+		placements[c].fpfn = 0;
+		placements[c++].flags = TTM_PL_FLAG_UNCACHED |
+			AMDGPU_PL_FLAG_OA;
+	}
+
+	if (!c) {
+		placements[c].fpfn = 0;
+		placements[c++].flags = TTM_PL_MASK_CACHING |
+			TTM_PL_FLAG_SYSTEM;
+	}
+	placement->num_placement = c;
+	placement->num_busy_placement = c;
+
+	for (i = 0; i < c; i++) {
+		if ((flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
+			(placements[i].flags & TTM_PL_FLAG_VRAM) &&
+			!placements[i].fpfn)
+			placements[i].lpfn =
+				adev->mc.visible_vram_size >> PAGE_SHIFT;
+		else
+			placements[i].lpfn = 0;
+	}
+}
+
+void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain)
+{
+	amdgpu_ttm_placement_init(rbo->adev, &rbo->placement,
+				  rbo->placements, domain, rbo->flags);
+}
+
+static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
+					struct ttm_placement *placement)
+{
+	BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
+
+	memcpy(bo->placements, placement->placement,
+	       placement->num_placement * sizeof(struct ttm_place));
+	bo->placement.num_placement = placement->num_placement;
+	bo->placement.num_busy_placement = placement->num_busy_placement;
+	bo->placement.placement = bo->placements;
+	bo->placement.busy_placement = bo->placements;
+}
+
+int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
+				unsigned long size, int byte_align,
+				bool kernel, u32 domain, u64 flags,
+				struct sg_table *sg,
+				struct ttm_placement *placement,
+				struct amdgpu_bo **bo_ptr)
+{
+	struct amdgpu_bo *bo;
+	enum ttm_bo_type type;
+	unsigned long page_align;
+	size_t acc_size;
+	int r;
+
+	/* VI has a hw bug where VM PTEs have to be allocated in groups of 8.
+	 * do this as a temporary workaround
+	 */
+	if (!(domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
+		if (adev->asic_type >= CHIP_TOPAZ) {
+			if (byte_align & 0x7fff)
+				byte_align = ALIGN(byte_align, 0x8000);
+			if (size & 0x7fff)
+				size = ALIGN(size, 0x8000);
+		}
+	}
+
+	page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
+	size = ALIGN(size, PAGE_SIZE);
+
+	if (kernel) {
+		type = ttm_bo_type_kernel;
+	} else if (sg) {
+		type = ttm_bo_type_sg;
+	} else {
+		type = ttm_bo_type_device;
+	}
+	*bo_ptr = NULL;
+
+	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
+				       sizeof(struct amdgpu_bo));
+
+	bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
+	if (bo == NULL)
+		return -ENOMEM;
+	r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
+	if (unlikely(r)) {
+		kfree(bo);
+		return r;
+	}
+	bo->adev = adev;
+	INIT_LIST_HEAD(&bo->list);
+	INIT_LIST_HEAD(&bo->va);
+	bo->initial_domain = domain & (AMDGPU_GEM_DOMAIN_VRAM |
+				       AMDGPU_GEM_DOMAIN_GTT |
+				       AMDGPU_GEM_DOMAIN_CPU |
+				       AMDGPU_GEM_DOMAIN_GDS |
+				       AMDGPU_GEM_DOMAIN_GWS |
+				       AMDGPU_GEM_DOMAIN_OA);
+
+	bo->flags = flags;
+	amdgpu_fill_placement_to_bo(bo, placement);
+	/* Kernel allocation are uninterruptible */
+	r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
+			&bo->placement, page_align, !kernel, NULL,
+			acc_size, sg, NULL, &amdgpu_ttm_bo_destroy);
+	if (unlikely(r != 0)) {
+		return r;
+	}
+	*bo_ptr = bo;
+
+	trace_amdgpu_bo_create(bo);
+
+	return 0;
+}
+
+int amdgpu_bo_create(struct amdgpu_device *adev,
+		     unsigned long size, int byte_align,
+		     bool kernel, u32 domain, u64 flags,
+		     struct sg_table *sg, struct amdgpu_bo **bo_ptr)
+{
+	struct ttm_placement placement = {0};
+	struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+
+	memset(&placements, 0,
+	       (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
+
+	amdgpu_ttm_placement_init(adev, &placement,
+				  placements, domain, flags);
+
+	return amdgpu_bo_create_restricted(adev, size, byte_align,
+					   kernel, domain, flags,
+					   sg,
+					   &placement,
+					   bo_ptr);
+}
+
+int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
+{
+	bool is_iomem;
+	int r;
+
+	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+		return -EPERM;
+
+	if (bo->kptr) {
+		if (ptr) {
+			*ptr = bo->kptr;
+		}
+		return 0;
+	}
+	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+	if (r) {
+		return r;
+	}
+	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+	if (ptr) {
+		*ptr = bo->kptr;
+	}
+	return 0;
+}
+
+void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
+{
+	if (bo->kptr == NULL)
+		return;
+	bo->kptr = NULL;
+	ttm_bo_kunmap(&bo->kmap);
+}
+
+struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
+{
+	if (bo == NULL)
+		return NULL;
+
+	ttm_bo_reference(&bo->tbo);
+	return bo;
+}
+
+void amdgpu_bo_unref(struct amdgpu_bo **bo)
+{
+	struct ttm_buffer_object *tbo;
+
+	if ((*bo) == NULL)
+		return;
+
+	tbo = &((*bo)->tbo);
+	ttm_bo_unref(&tbo);
+	if (tbo == NULL)
+		*bo = NULL;
+}
+
+int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+			     u64 min_offset, u64 max_offset,
+			     u64 *gpu_addr)
+{
+	int r, i;
+	unsigned fpfn, lpfn;
+
+	if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm))
+		return -EPERM;
+
+	if (WARN_ON_ONCE(min_offset > max_offset))
+		return -EINVAL;
+
+	if (bo->pin_count) {
+		bo->pin_count++;
+		if (gpu_addr)
+			*gpu_addr = amdgpu_bo_gpu_offset(bo);
+
+		if (max_offset != 0) {
+			u64 domain_start;
+			if (domain == AMDGPU_GEM_DOMAIN_VRAM)
+				domain_start = bo->adev->mc.vram_start;
+			else
+				domain_start = bo->adev->mc.gtt_start;
+			WARN_ON_ONCE(max_offset <
+				     (amdgpu_bo_gpu_offset(bo) - domain_start));
+		}
+
+		return 0;
+	}
+	amdgpu_ttm_placement_from_domain(bo, domain);
+	for (i = 0; i < bo->placement.num_placement; i++) {
+		/* force to pin into visible video ram */
+		if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
+		    !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
+		    (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) {
+			if (WARN_ON_ONCE(min_offset >
+					 bo->adev->mc.visible_vram_size))
+				return -EINVAL;
+			fpfn = min_offset >> PAGE_SHIFT;
+			lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT;
+		} else {
+			fpfn = min_offset >> PAGE_SHIFT;
+			lpfn = max_offset >> PAGE_SHIFT;
+		}
+		if (fpfn > bo->placements[i].fpfn)
+			bo->placements[i].fpfn = fpfn;
+		if (lpfn && lpfn < bo->placements[i].lpfn)
+			bo->placements[i].lpfn = lpfn;
+		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
+	}
+
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+	if (likely(r == 0)) {
+		bo->pin_count = 1;
+		if (gpu_addr != NULL)
+			*gpu_addr = amdgpu_bo_gpu_offset(bo);
+		if (domain == AMDGPU_GEM_DOMAIN_VRAM)
+			bo->adev->vram_pin_size += amdgpu_bo_size(bo);
+		else
+			bo->adev->gart_pin_size += amdgpu_bo_size(bo);
+	} else {
+		dev_err(bo->adev->dev, "%p pin failed\n", bo);
+	}
+	return r;
+}
+
+int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
+{
+	return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
+}
+
+int amdgpu_bo_unpin(struct amdgpu_bo *bo)
+{
+	int r, i;
+
+	if (!bo->pin_count) {
+		dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo);
+		return 0;
+	}
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+	for (i = 0; i < bo->placement.num_placement; i++) {
+		bo->placements[i].lpfn = 0;
+		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
+	}
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+	if (likely(r == 0)) {
+		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+			bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
+		else
+			bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
+	} else {
+		dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
+	}
+	return r;
+}
+
+int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
+{
+	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
+	if (0 && (adev->flags & AMDGPU_IS_APU)) {
+		/* Useless to evict on IGP chips */
+		return 0;
+	}
+	return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
+}
+
+void amdgpu_bo_force_delete(struct amdgpu_device *adev)
+{
+	struct amdgpu_bo *bo, *n;
+
+	if (list_empty(&adev->gem.objects)) {
+		return;
+	}
+	dev_err(adev->dev, "Userspace still has active objects !\n");
+	list_for_each_entry_safe(bo, n, &adev->gem.objects, list) {
+		mutex_lock(&adev->ddev->struct_mutex);
+		dev_err(adev->dev, "%p %p %lu %lu force free\n",
+			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
+			*((unsigned long *)&bo->gem_base.refcount));
+		mutex_lock(&bo->adev->gem.mutex);
+		list_del_init(&bo->list);
+		mutex_unlock(&bo->adev->gem.mutex);
+		/* this should unref the ttm bo */
+		drm_gem_object_unreference(&bo->gem_base);
+		mutex_unlock(&adev->ddev->struct_mutex);
+	}
+}
+
+int amdgpu_bo_init(struct amdgpu_device *adev)
+{
+	/* Add an MTRR for the VRAM */
+	adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
+					      adev->mc.aper_size);
+	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
+		adev->mc.mc_vram_size >> 20,
+		(unsigned long long)adev->mc.aper_size >> 20);
+	DRM_INFO("RAM width %dbits DDR\n",
+			adev->mc.vram_width);
+	return amdgpu_ttm_init(adev);
+}
+
+void amdgpu_bo_fini(struct amdgpu_device *adev)
+{
+	amdgpu_ttm_fini(adev);
+	arch_phys_wc_del(adev->mc.vram_mtrr);
+}
+
+int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
+			     struct vm_area_struct *vma)
+{
+	return ttm_fbdev_mmap(vma, &bo->tbo);
+}
+
+int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
+{
+	if (AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
+		return -EINVAL;
+
+	bo->tiling_flags = tiling_flags;
+	return 0;
+}
+
+void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
+{
+	lockdep_assert_held(&bo->tbo.resv->lock.base);
+
+	if (tiling_flags)
+		*tiling_flags = bo->tiling_flags;
+}
+
+int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
+			    uint32_t metadata_size, uint64_t flags)
+{
+	void *buffer;
+
+	if (!metadata_size) {
+		if (bo->metadata_size) {
+			kfree(bo->metadata);
+			bo->metadata_size = 0;
+		}
+		return 0;
+	}
+
+	if (metadata == NULL)
+		return -EINVAL;
+
+	buffer = kzalloc(metadata_size, GFP_KERNEL);
+	if (buffer == NULL)
+		return -ENOMEM;
+
+	memcpy(buffer, metadata, metadata_size);
+
+	kfree(bo->metadata);
+	bo->metadata_flags = flags;
+	bo->metadata = buffer;
+	bo->metadata_size = metadata_size;
+
+	return 0;
+}
+
+int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
+			   size_t buffer_size, uint32_t *metadata_size,
+			   uint64_t *flags)
+{
+	if (!buffer && !metadata_size)
+		return -EINVAL;
+
+	if (buffer) {
+		if (buffer_size < bo->metadata_size)
+			return -EINVAL;
+
+		if (bo->metadata_size)
+			memcpy(buffer, bo->metadata, bo->metadata_size);
+	}
+
+	if (metadata_size)
+		*metadata_size = bo->metadata_size;
+	if (flags)
+		*flags = bo->metadata_flags;
+
+	return 0;
+}
+
+void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
+			   struct ttm_mem_reg *new_mem)
+{
+	struct amdgpu_bo *rbo;
+
+	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
+		return;
+
+	rbo = container_of(bo, struct amdgpu_bo, tbo);
+	amdgpu_vm_bo_invalidate(rbo->adev, rbo);
+
+	/* update statistics */
+	if (!new_mem)
+		return;
+
+	/* move_notify is called before move happens */
+	amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
+}
+
+int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+	struct amdgpu_device *adev;
+	struct amdgpu_bo *abo;
+	unsigned long offset, size, lpfn;
+	int i, r;
+
+	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
+		return 0;
+
+	abo = container_of(bo, struct amdgpu_bo, tbo);
+	adev = abo->adev;
+	if (bo->mem.mem_type != TTM_PL_VRAM)
+		return 0;
+
+	size = bo->mem.num_pages << PAGE_SHIFT;
+	offset = bo->mem.start << PAGE_SHIFT;
+	if ((offset + size) <= adev->mc.visible_vram_size)
+		return 0;
+
+	/* hurrah the memory is not visible ! */
+	amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
+	lpfn =	adev->mc.visible_vram_size >> PAGE_SHIFT;
+	for (i = 0; i < abo->placement.num_placement; i++) {
+		/* Force into visible VRAM */
+		if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
+		    (!abo->placements[i].lpfn || abo->placements[i].lpfn > lpfn))
+			abo->placements[i].lpfn = lpfn;
+	}
+	r = ttm_bo_validate(bo, &abo->placement, false, false);
+	if (unlikely(r == -ENOMEM)) {
+		amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
+		return ttm_bo_validate(bo, &abo->placement, false, false);
+	} else if (unlikely(r != 0)) {
+		return r;
+	}
+
+	offset = bo->mem.start << PAGE_SHIFT;
+	/* this should never happen */
+	if ((offset + size) > adev->mc.visible_vram_size)
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * amdgpu_bo_fence - add fence to buffer object
+ *
+ * @bo: buffer object in question
+ * @fence: fence to add
+ * @shared: true if fence should be added shared
+ *
+ */
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence,
+		     bool shared)
+{
+	struct reservation_object *resv = bo->tbo.resv;
+
+	if (shared)
+		reservation_object_add_shared_fence(resv, &fence->base);
+	else
+		reservation_object_add_excl_fence(resv, &fence->base);
+}

+ 203 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h

@@ -0,0 +1,203 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __AMDGPU_OBJECT_H__
+#define __AMDGPU_OBJECT_H__
+
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+
+/**
+ * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
+ * @mem_type:	ttm memory type
+ *
+ * Returns corresponding domain of the ttm mem_type
+ */
+static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
+{
+	switch (mem_type) {
+	case TTM_PL_VRAM:
+		return AMDGPU_GEM_DOMAIN_VRAM;
+	case TTM_PL_TT:
+		return AMDGPU_GEM_DOMAIN_GTT;
+	case TTM_PL_SYSTEM:
+		return AMDGPU_GEM_DOMAIN_CPU;
+	case AMDGPU_PL_GDS:
+		return AMDGPU_GEM_DOMAIN_GDS;
+	case AMDGPU_PL_GWS:
+		return AMDGPU_GEM_DOMAIN_GWS;
+	case AMDGPU_PL_OA:
+		return AMDGPU_GEM_DOMAIN_OA;
+	default:
+		break;
+	}
+	return 0;
+}
+
+/**
+ * amdgpu_bo_reserve - reserve bo
+ * @bo:		bo structure
+ * @no_intr:	don't return -ERESTARTSYS on pending signal
+ *
+ * Returns:
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ */
+static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
+{
+	int r;
+
+	r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
+	if (unlikely(r != 0)) {
+		if (r != -ERESTARTSYS)
+			dev_err(bo->adev->dev, "%p reserve failed\n", bo);
+		return r;
+	}
+	return 0;
+}
+
+static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
+{
+	ttm_bo_unreserve(&bo->tbo);
+}
+
+/**
+ * amdgpu_bo_gpu_offset - return GPU offset of bo
+ * @bo:	amdgpu object for which we query the offset
+ *
+ * Returns current GPU offset of the object.
+ *
+ * Note: object should either be pinned or reserved when calling this
+ * function, it might be useful to add check for this for debugging.
+ */
+static inline u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
+{
+	return bo->tbo.offset;
+}
+
+static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
+{
+	return bo->tbo.num_pages << PAGE_SHIFT;
+}
+
+static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
+{
+	return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
+}
+
+static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
+{
+	return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
+}
+
+/**
+ * amdgpu_bo_mmap_offset - return mmap offset of bo
+ * @bo:	amdgpu object for which we query the offset
+ *
+ * Returns mmap offset of the object.
+ */
+static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
+{
+	return drm_vma_node_offset_addr(&bo->tbo.vma_node);
+}
+
+int amdgpu_bo_create(struct amdgpu_device *adev,
+			    unsigned long size, int byte_align,
+			    bool kernel, u32 domain, u64 flags,
+			    struct sg_table *sg,
+			    struct amdgpu_bo **bo_ptr);
+int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
+				unsigned long size, int byte_align,
+				bool kernel, u32 domain, u64 flags,
+				struct sg_table *sg,
+				struct ttm_placement *placement,
+				struct amdgpu_bo **bo_ptr);
+int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
+void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
+struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
+void amdgpu_bo_unref(struct amdgpu_bo **bo);
+int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
+int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+			     u64 min_offset, u64 max_offset,
+			     u64 *gpu_addr);
+int amdgpu_bo_unpin(struct amdgpu_bo *bo);
+int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
+void amdgpu_bo_force_delete(struct amdgpu_device *adev);
+int amdgpu_bo_init(struct amdgpu_device *adev);
+void amdgpu_bo_fini(struct amdgpu_device *adev);
+int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
+				struct vm_area_struct *vma);
+int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
+void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
+int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
+			    uint32_t metadata_size, uint64_t flags);
+int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
+			   size_t buffer_size, uint32_t *metadata_size,
+			   uint64_t *flags);
+void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
+				  struct ttm_mem_reg *new_mem);
+int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence,
+		     bool shared);
+
+/*
+ * sub allocation
+ */
+
+static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
+{
+	return sa_bo->manager->gpu_addr + sa_bo->soffset;
+}
+
+static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
+{
+	return sa_bo->manager->cpu_ptr + sa_bo->soffset;
+}
+
+int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
+				     struct amdgpu_sa_manager *sa_manager,
+				     unsigned size, u32 align, u32 domain);
+void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
+				      struct amdgpu_sa_manager *sa_manager);
+int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
+				      struct amdgpu_sa_manager *sa_manager);
+int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
+					struct amdgpu_sa_manager *sa_manager);
+int amdgpu_sa_bo_new(struct amdgpu_device *adev,
+			    struct amdgpu_sa_manager *sa_manager,
+			    struct amdgpu_sa_bo **sa_bo,
+			    unsigned size, unsigned align);
+void amdgpu_sa_bo_free(struct amdgpu_device *adev,
+			      struct amdgpu_sa_bo **sa_bo,
+			      struct amdgpu_fence *fence);
+#if defined(CONFIG_DEBUG_FS)
+void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
+					 struct seq_file *m);
+#endif
+
+
+#endif

+ 350 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c

@@ -0,0 +1,350 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+#include "atom.h"
+#include "atombios_encoders.h"
+#include <asm/div64.h>
+#include <linux/gcd.h>
+
+/**
+ * amdgpu_pll_reduce_ratio - fractional number reduction
+ *
+ * @nom: nominator
+ * @den: denominator
+ * @nom_min: minimum value for nominator
+ * @den_min: minimum value for denominator
+ *
+ * Find the greatest common divisor and apply it on both nominator and
+ * denominator, but make nominator and denominator are at least as large
+ * as their minimum values.
+ */
+static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den,
+				    unsigned nom_min, unsigned den_min)
+{
+	unsigned tmp;
+
+	/* reduce the numbers to a simpler ratio */
+	tmp = gcd(*nom, *den);
+	*nom /= tmp;
+	*den /= tmp;
+
+	/* make sure nominator is large enough */
+	if (*nom < nom_min) {
+		tmp = DIV_ROUND_UP(nom_min, *nom);
+		*nom *= tmp;
+		*den *= tmp;
+	}
+
+	/* make sure the denominator is large enough */
+	if (*den < den_min) {
+		tmp = DIV_ROUND_UP(den_min, *den);
+		*nom *= tmp;
+		*den *= tmp;
+	}
+}
+
+/**
+ * amdgpu_pll_get_fb_ref_div - feedback and ref divider calculation
+ *
+ * @nom: nominator
+ * @den: denominator
+ * @post_div: post divider
+ * @fb_div_max: feedback divider maximum
+ * @ref_div_max: reference divider maximum
+ * @fb_div: resulting feedback divider
+ * @ref_div: resulting reference divider
+ *
+ * Calculate feedback and reference divider for a given post divider. Makes
+ * sure we stay within the limits.
+ */
+static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
+				      unsigned fb_div_max, unsigned ref_div_max,
+				      unsigned *fb_div, unsigned *ref_div)
+{
+	/* limit reference * post divider to a maximum */
+	ref_div_max = min(128 / post_div, ref_div_max);
+
+	/* get matching reference and feedback divider */
+	*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
+	*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
+
+	/* limit fb divider to its maximum */
+	if (*fb_div > fb_div_max) {
+		*ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
+		*fb_div = fb_div_max;
+	}
+}
+
+/**
+ * amdgpu_pll_compute - compute PLL paramaters
+ *
+ * @pll: information about the PLL
+ * @dot_clock_p: resulting pixel clock
+ * fb_div_p: resulting feedback divider
+ * frac_fb_div_p: fractional part of the feedback divider
+ * ref_div_p: resulting reference divider
+ * post_div_p: resulting reference divider
+ *
+ * Try to calculate the PLL parameters to generate the given frequency:
+ * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
+ */
+void amdgpu_pll_compute(struct amdgpu_pll *pll,
+			u32 freq,
+			u32 *dot_clock_p,
+			u32 *fb_div_p,
+			u32 *frac_fb_div_p,
+			u32 *ref_div_p,
+			u32 *post_div_p)
+{
+	unsigned target_clock = pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV ?
+		freq : freq / 10;
+
+	unsigned fb_div_min, fb_div_max, fb_div;
+	unsigned post_div_min, post_div_max, post_div;
+	unsigned ref_div_min, ref_div_max, ref_div;
+	unsigned post_div_best, diff_best;
+	unsigned nom, den;
+
+	/* determine allowed feedback divider range */
+	fb_div_min = pll->min_feedback_div;
+	fb_div_max = pll->max_feedback_div;
+
+	if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
+		fb_div_min *= 10;
+		fb_div_max *= 10;
+	}
+
+	/* determine allowed ref divider range */
+	if (pll->flags & AMDGPU_PLL_USE_REF_DIV)
+		ref_div_min = pll->reference_div;
+	else
+		ref_div_min = pll->min_ref_div;
+
+	if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV &&
+	    pll->flags & AMDGPU_PLL_USE_REF_DIV)
+		ref_div_max = pll->reference_div;
+	else
+		ref_div_max = pll->max_ref_div;
+
+	/* determine allowed post divider range */
+	if (pll->flags & AMDGPU_PLL_USE_POST_DIV) {
+		post_div_min = pll->post_div;
+		post_div_max = pll->post_div;
+	} else {
+		unsigned vco_min, vco_max;
+
+		if (pll->flags & AMDGPU_PLL_IS_LCD) {
+			vco_min = pll->lcd_pll_out_min;
+			vco_max = pll->lcd_pll_out_max;
+		} else {
+			vco_min = pll->pll_out_min;
+			vco_max = pll->pll_out_max;
+		}
+
+		if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
+			vco_min *= 10;
+			vco_max *= 10;
+		}
+
+		post_div_min = vco_min / target_clock;
+		if ((target_clock * post_div_min) < vco_min)
+			++post_div_min;
+		if (post_div_min < pll->min_post_div)
+			post_div_min = pll->min_post_div;
+
+		post_div_max = vco_max / target_clock;
+		if ((target_clock * post_div_max) > vco_max)
+			--post_div_max;
+		if (post_div_max > pll->max_post_div)
+			post_div_max = pll->max_post_div;
+	}
+
+	/* represent the searched ratio as fractional number */
+	nom = target_clock;
+	den = pll->reference_freq;
+
+	/* reduce the numbers to a simpler ratio */
+	amdgpu_pll_reduce_ratio(&nom, &den, fb_div_min, post_div_min);
+
+	/* now search for a post divider */
+	if (pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP)
+		post_div_best = post_div_min;
+	else
+		post_div_best = post_div_max;
+	diff_best = ~0;
+
+	for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
+		unsigned diff;
+		amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max,
+					  ref_div_max, &fb_div, &ref_div);
+		diff = abs(target_clock - (pll->reference_freq * fb_div) /
+			(ref_div * post_div));
+
+		if (diff < diff_best || (diff == diff_best &&
+		    !(pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP))) {
+
+			post_div_best = post_div;
+			diff_best = diff;
+		}
+	}
+	post_div = post_div_best;
+
+	/* get the feedback and reference divider for the optimal value */
+	amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
+				  &fb_div, &ref_div);
+
+	/* reduce the numbers to a simpler ratio once more */
+	/* this also makes sure that the reference divider is large enough */
+	amdgpu_pll_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
+
+	/* avoid high jitter with small fractional dividers */
+	if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
+		fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60);
+		if (fb_div < fb_div_min) {
+			unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
+			fb_div *= tmp;
+			ref_div *= tmp;
+		}
+	}
+
+	/* and finally save the result */
+	if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
+		*fb_div_p = fb_div / 10;
+		*frac_fb_div_p = fb_div % 10;
+	} else {
+		*fb_div_p = fb_div;
+		*frac_fb_div_p = 0;
+	}
+
+	*dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) +
+			(pll->reference_freq * *frac_fb_div_p)) /
+		       (ref_div * post_div * 10);
+	*ref_div_p = ref_div;
+	*post_div_p = post_div;
+
+	DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
+		      freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p,
+		      ref_div, post_div);
+}
+
+/**
+ * amdgpu_pll_get_use_mask - look up a mask of which pplls are in use
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the mask of which PPLLs (Pixel PLLs) are in use.
+ */
+u32 amdgpu_pll_get_use_mask(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *test_crtc;
+	struct amdgpu_crtc *test_amdgpu_crtc;
+	u32 pll_in_use = 0;
+
+	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc == test_crtc)
+			continue;
+
+		test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
+		if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
+			pll_in_use |= (1 << test_amdgpu_crtc->pll_id);
+	}
+	return pll_in_use;
+}
+
+/**
+ * amdgpu_pll_get_shared_dp_ppll - return the PPLL used by another crtc for DP
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
+ * also in DP mode.  For DP, a single PPLL can be used for all DP
+ * crtcs/encoders.
+ */
+int amdgpu_pll_get_shared_dp_ppll(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *test_crtc;
+	struct amdgpu_crtc *test_amdgpu_crtc;
+
+	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc == test_crtc)
+			continue;
+		test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
+		if (test_amdgpu_crtc->encoder &&
+		    ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
+			/* for DP use the same PLL for all */
+			if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
+				return test_amdgpu_crtc->pll_id;
+		}
+	}
+	return ATOM_PPLL_INVALID;
+}
+
+/**
+ * amdgpu_pll_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc
+ *
+ * @crtc: drm crtc
+ * @encoder: drm encoder
+ *
+ * Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can
+ * be shared (i.e., same clock).
+ */
+int amdgpu_pll_get_shared_nondp_ppll(struct drm_crtc *crtc)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *test_crtc;
+	struct amdgpu_crtc *test_amdgpu_crtc;
+	u32 adjusted_clock, test_adjusted_clock;
+
+	adjusted_clock = amdgpu_crtc->adjusted_clock;
+
+	if (adjusted_clock == 0)
+		return ATOM_PPLL_INVALID;
+
+	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc == test_crtc)
+			continue;
+		test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
+		if (test_amdgpu_crtc->encoder &&
+		    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
+			/* check if we are already driving this connector with another crtc */
+			if (test_amdgpu_crtc->connector == amdgpu_crtc->connector) {
+				/* if we are, return that pll */
+				if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
+					return test_amdgpu_crtc->pll_id;
+			}
+			/* for non-DP check the clock */
+			test_adjusted_clock = test_amdgpu_crtc->adjusted_clock;
+			if ((crtc->mode.clock == test_crtc->mode.clock) &&
+			    (adjusted_clock == test_adjusted_clock) &&
+			    (amdgpu_crtc->ss_enabled == test_amdgpu_crtc->ss_enabled) &&
+			    (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID))
+				return test_amdgpu_crtc->pll_id;
+		}
+	}
+	return ATOM_PPLL_INVALID;
+}

+ 38 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h

@@ -0,0 +1,38 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_PLL_H__
+#define __AMDGPU_PLL_H__
+
+void amdgpu_pll_compute(struct amdgpu_pll *pll,
+			 u32 freq,
+			 u32 *dot_clock_p,
+			 u32 *fb_div_p,
+			 u32 *frac_fb_div_p,
+			 u32 *ref_div_p,
+			 u32 *post_div_p);
+u32 amdgpu_pll_get_use_mask(struct drm_crtc *crtc);
+int amdgpu_pll_get_shared_dp_ppll(struct drm_crtc *crtc);
+int amdgpu_pll_get_shared_nondp_ppll(struct drm_crtc *crtc);
+
+#endif

+ 799 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c

@@ -0,0 +1,799 @@
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rafał Miłecki <zajec5@gmail.com>
+ *          Alex Deucher <alexdeucher@gmail.com>
+ */
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_drv.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_dpm.h"
+#include "atom.h"
+#include <linux/power_supply.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
+
+void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
+{
+	if (adev->pm.dpm_enabled) {
+		mutex_lock(&adev->pm.mutex);
+		if (power_supply_is_system_supplied() > 0)
+			adev->pm.dpm.ac_power = true;
+		else
+			adev->pm.dpm.ac_power = false;
+		if (adev->pm.funcs->enable_bapm)
+			amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
+		mutex_unlock(&adev->pm.mutex);
+	}
+}
+
+static ssize_t amdgpu_get_dpm_state(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = ddev->dev_private;
+	enum amdgpu_pm_state_type pm = adev->pm.dpm.user_state;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
+			(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
+}
+
+static ssize_t amdgpu_set_dpm_state(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf,
+				    size_t count)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = ddev->dev_private;
+
+	mutex_lock(&adev->pm.mutex);
+	if (strncmp("battery", buf, strlen("battery")) == 0)
+		adev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
+	else if (strncmp("balanced", buf, strlen("balanced")) == 0)
+		adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
+	else if (strncmp("performance", buf, strlen("performance")) == 0)
+		adev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
+	else {
+		mutex_unlock(&adev->pm.mutex);
+		count = -EINVAL;
+		goto fail;
+	}
+	mutex_unlock(&adev->pm.mutex);
+
+	/* Can't set dpm state when the card is off */
+	if (!(adev->flags & AMDGPU_IS_PX) ||
+	    (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
+		amdgpu_pm_compute_clocks(adev);
+fail:
+	return count;
+}
+
+static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
+						       struct device_attribute *attr,
+						       char *buf)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = ddev->dev_private;
+	enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			(level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" :
+			(level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
+}
+
+static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
+						       struct device_attribute *attr,
+						       const char *buf,
+						       size_t count)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = ddev->dev_private;
+	enum amdgpu_dpm_forced_level level;
+	int ret = 0;
+
+	mutex_lock(&adev->pm.mutex);
+	if (strncmp("low", buf, strlen("low")) == 0) {
+		level = AMDGPU_DPM_FORCED_LEVEL_LOW;
+	} else if (strncmp("high", buf, strlen("high")) == 0) {
+		level = AMDGPU_DPM_FORCED_LEVEL_HIGH;
+	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
+		level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
+	} else {
+		count = -EINVAL;
+		goto fail;
+	}
+	if (adev->pm.funcs->force_performance_level) {
+		if (adev->pm.dpm.thermal_active) {
+			count = -EINVAL;
+			goto fail;
+		}
+		ret = amdgpu_dpm_force_performance_level(adev, level);
+		if (ret)
+			count = -EINVAL;
+	}
+fail:
+	mutex_unlock(&adev->pm.mutex);
+
+	return count;
+}
+
+static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
+static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
+		   amdgpu_get_dpm_forced_performance_level,
+		   amdgpu_set_dpm_forced_performance_level);
+
+static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct amdgpu_device *adev = dev_get_drvdata(dev);
+	int temp;
+
+	if (adev->pm.funcs->get_temperature)
+		temp = amdgpu_dpm_get_temperature(adev);
+	else
+		temp = 0;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+}
+
+static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
+					     struct device_attribute *attr,
+					     char *buf)
+{
+	struct amdgpu_device *adev = dev_get_drvdata(dev);
+	int hyst = to_sensor_dev_attr(attr)->index;
+	int temp;
+
+	if (hyst)
+		temp = adev->pm.dpm.thermal.min_temp;
+	else
+		temp = adev->pm.dpm.thermal.max_temp;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+}
+
+static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct amdgpu_device *adev = dev_get_drvdata(dev);
+	u32 pwm_mode = 0;
+
+	if (adev->pm.funcs->get_fan_control_mode)
+		pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+
+	/* never 0 (full-speed), fuse or smc-controlled always */
+	return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
+}
+
+static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf,
+					    size_t count)
+{
+	struct amdgpu_device *adev = dev_get_drvdata(dev);
+	int err;
+	int value;
+
+	if(!adev->pm.funcs->set_fan_control_mode)
+		return -EINVAL;
+
+	err = kstrtoint(buf, 10, &value);
+	if (err)
+		return err;
+
+	switch (value) {
+	case 1: /* manual, percent-based */
+		amdgpu_dpm_set_fan_control_mode(adev, FDO_PWM_MODE_STATIC);
+		break;
+	default: /* disable */
+		amdgpu_dpm_set_fan_control_mode(adev, 0);
+		break;
+	}
+
+	return count;
+}
+
+static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	return sprintf(buf, "%i\n", 0);
+}
+
+static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	return sprintf(buf, "%i\n", 255);
+}
+
+static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct amdgpu_device *adev = dev_get_drvdata(dev);
+	int err;
+	u32 value;
+
+	err = kstrtou32(buf, 10, &value);
+	if (err)
+		return err;
+
+	value = (value * 100) / 255;
+
+	err = amdgpu_dpm_set_fan_speed_percent(adev, value);
+	if (err)
+		return err;
+
+	return count;
+}
+
+static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct amdgpu_device *adev = dev_get_drvdata(dev);
+	int err;
+	u32 speed;
+
+	err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
+	if (err)
+		return err;
+
+	speed = (speed * 255) / 100;
+
+	return sprintf(buf, "%i\n", speed);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
+static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
+static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
+static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
+
+static struct attribute *hwmon_attributes[] = {
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	&sensor_dev_attr_temp1_crit.dev_attr.attr,
+	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
+	&sensor_dev_attr_pwm1.dev_attr.attr,
+	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
+	&sensor_dev_attr_pwm1_min.dev_attr.attr,
+	&sensor_dev_attr_pwm1_max.dev_attr.attr,
+	NULL
+};
+
+static umode_t hwmon_attributes_visible(struct kobject *kobj,
+					struct attribute *attr, int index)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct amdgpu_device *adev = dev_get_drvdata(dev);
+	umode_t effective_mode = attr->mode;
+
+	/* Skip limit attributes if DPM is not enabled */
+	if (!adev->pm.dpm_enabled &&
+	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
+	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
+		return 0;
+
+	/* Skip fan attributes if fan is not present */
+	if (adev->pm.no_fan &&
+	    (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
+	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
+	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
+		return 0;
+
+	/* mask fan attributes if we have no bindings for this asic to expose */
+	if ((!adev->pm.funcs->get_fan_speed_percent &&
+	     attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
+	    (!adev->pm.funcs->get_fan_control_mode &&
+	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
+		effective_mode &= ~S_IRUGO;
+
+	if ((!adev->pm.funcs->set_fan_speed_percent &&
+	     attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
+	    (!adev->pm.funcs->set_fan_control_mode &&
+	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
+		effective_mode &= ~S_IWUSR;
+
+	/* hide max/min values if we can't both query and manage the fan */
+	if ((!adev->pm.funcs->set_fan_speed_percent &&
+	     !adev->pm.funcs->get_fan_speed_percent) &&
+	    (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
+		return 0;
+
+	return effective_mode;
+}
+
+static const struct attribute_group hwmon_attrgroup = {
+	.attrs = hwmon_attributes,
+	.is_visible = hwmon_attributes_visible,
+};
+
+static const struct attribute_group *hwmon_groups[] = {
+	&hwmon_attrgroup,
+	NULL
+};
+
+void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
+{
+	struct amdgpu_device *adev =
+		container_of(work, struct amdgpu_device,
+			     pm.dpm.thermal.work);
+	/* switch to the thermal state */
+	enum amdgpu_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
+
+	if (!adev->pm.dpm_enabled)
+		return;
+
+	if (adev->pm.funcs->get_temperature) {
+		int temp = amdgpu_dpm_get_temperature(adev);
+
+		if (temp < adev->pm.dpm.thermal.min_temp)
+			/* switch back the user state */
+			dpm_state = adev->pm.dpm.user_state;
+	} else {
+		if (adev->pm.dpm.thermal.high_to_low)
+			/* switch back the user state */
+			dpm_state = adev->pm.dpm.user_state;
+	}
+	mutex_lock(&adev->pm.mutex);
+	if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
+		adev->pm.dpm.thermal_active = true;
+	else
+		adev->pm.dpm.thermal_active = false;
+	adev->pm.dpm.state = dpm_state;
+	mutex_unlock(&adev->pm.mutex);
+
+	amdgpu_pm_compute_clocks(adev);
+}
+
+static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
+						     enum amdgpu_pm_state_type dpm_state)
+{
+	int i;
+	struct amdgpu_ps *ps;
+	u32 ui_class;
+	bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
+		true : false;
+
+	/* check if the vblank period is too short to adjust the mclk */
+	if (single_display && adev->pm.funcs->vblank_too_short) {
+		if (amdgpu_dpm_vblank_too_short(adev))
+			single_display = false;
+	}
+
+	/* certain older asics have a separare 3D performance state,
+	 * so try that first if the user selected performance
+	 */
+	if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
+		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
+	/* balanced states don't exist at the moment */
+	if (dpm_state == POWER_STATE_TYPE_BALANCED)
+		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+
+restart_search:
+	/* Pick the best power state based on current conditions */
+	for (i = 0; i < adev->pm.dpm.num_ps; i++) {
+		ps = &adev->pm.dpm.ps[i];
+		ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
+		switch (dpm_state) {
+		/* user states */
+		case POWER_STATE_TYPE_BATTERY:
+			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
+				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
+					if (single_display)
+						return ps;
+				} else
+					return ps;
+			}
+			break;
+		case POWER_STATE_TYPE_BALANCED:
+			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
+				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
+					if (single_display)
+						return ps;
+				} else
+					return ps;
+			}
+			break;
+		case POWER_STATE_TYPE_PERFORMANCE:
+			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
+				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
+					if (single_display)
+						return ps;
+				} else
+					return ps;
+			}
+			break;
+		/* internal states */
+		case POWER_STATE_TYPE_INTERNAL_UVD:
+			if (adev->pm.dpm.uvd_ps)
+				return adev->pm.dpm.uvd_ps;
+			else
+				break;
+		case POWER_STATE_TYPE_INTERNAL_UVD_SD:
+			if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
+				return ps;
+			break;
+		case POWER_STATE_TYPE_INTERNAL_UVD_HD:
+			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
+				return ps;
+			break;
+		case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
+			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
+				return ps;
+			break;
+		case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
+			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
+				return ps;
+			break;
+		case POWER_STATE_TYPE_INTERNAL_BOOT:
+			return adev->pm.dpm.boot_ps;
+		case POWER_STATE_TYPE_INTERNAL_THERMAL:
+			if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
+				return ps;
+			break;
+		case POWER_STATE_TYPE_INTERNAL_ACPI:
+			if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
+				return ps;
+			break;
+		case POWER_STATE_TYPE_INTERNAL_ULV:
+			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
+				return ps;
+			break;
+		case POWER_STATE_TYPE_INTERNAL_3DPERF:
+			if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+				return ps;
+			break;
+		default:
+			break;
+		}
+	}
+	/* use a fallback state if we didn't match */
+	switch (dpm_state) {
+	case POWER_STATE_TYPE_INTERNAL_UVD_SD:
+		dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
+		goto restart_search;
+	case POWER_STATE_TYPE_INTERNAL_UVD_HD:
+	case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
+	case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
+		if (adev->pm.dpm.uvd_ps) {
+			return adev->pm.dpm.uvd_ps;
+		} else {
+			dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+			goto restart_search;
+		}
+	case POWER_STATE_TYPE_INTERNAL_THERMAL:
+		dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
+		goto restart_search;
+	case POWER_STATE_TYPE_INTERNAL_ACPI:
+		dpm_state = POWER_STATE_TYPE_BATTERY;
+		goto restart_search;
+	case POWER_STATE_TYPE_BATTERY:
+	case POWER_STATE_TYPE_BALANCED:
+	case POWER_STATE_TYPE_INTERNAL_3DPERF:
+		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+		goto restart_search;
+	default:
+		break;
+	}
+
+	return NULL;
+}
+
+static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
+{
+	int i;
+	struct amdgpu_ps *ps;
+	enum amdgpu_pm_state_type dpm_state;
+	int ret;
+
+	/* if dpm init failed */
+	if (!adev->pm.dpm_enabled)
+		return;
+
+	if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
+		/* add other state override checks here */
+		if ((!adev->pm.dpm.thermal_active) &&
+		    (!adev->pm.dpm.uvd_active))
+			adev->pm.dpm.state = adev->pm.dpm.user_state;
+	}
+	dpm_state = adev->pm.dpm.state;
+
+	ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
+	if (ps)
+		adev->pm.dpm.requested_ps = ps;
+	else
+		return;
+
+	/* no need to reprogram if nothing changed unless we are on BTC+ */
+	if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
+		/* vce just modifies an existing state so force a change */
+		if (ps->vce_active != adev->pm.dpm.vce_active)
+			goto force;
+		if (adev->flags & AMDGPU_IS_APU) {
+			/* for APUs if the num crtcs changed but state is the same,
+			 * all we need to do is update the display configuration.
+			 */
+			if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
+				/* update display watermarks based on new power state */
+				amdgpu_display_bandwidth_update(adev);
+				/* update displays */
+				amdgpu_dpm_display_configuration_changed(adev);
+				adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
+				adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
+			}
+			return;
+		} else {
+			/* for BTC+ if the num crtcs hasn't changed and state is the same,
+			 * nothing to do, if the num crtcs is > 1 and state is the same,
+			 * update display configuration.
+			 */
+			if (adev->pm.dpm.new_active_crtcs ==
+			    adev->pm.dpm.current_active_crtcs) {
+				return;
+			} else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
+				   (adev->pm.dpm.new_active_crtc_count > 1)) {
+				/* update display watermarks based on new power state */
+				amdgpu_display_bandwidth_update(adev);
+				/* update displays */
+				amdgpu_dpm_display_configuration_changed(adev);
+				adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
+				adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
+				return;
+			}
+		}
+	}
+
+force:
+	if (amdgpu_dpm == 1) {
+		printk("switching from power state:\n");
+		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
+		printk("switching to power state:\n");
+		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
+	}
+
+	mutex_lock(&adev->ddev->struct_mutex);
+	mutex_lock(&adev->ring_lock);
+
+	/* update whether vce is active */
+	ps->vce_active = adev->pm.dpm.vce_active;
+
+	ret = amdgpu_dpm_pre_set_power_state(adev);
+	if (ret)
+		goto done;
+
+	/* update display watermarks based on new power state */
+	amdgpu_display_bandwidth_update(adev);
+	/* update displays */
+	amdgpu_dpm_display_configuration_changed(adev);
+
+	adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
+	adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
+
+	/* wait for the rings to drain */
+	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+		struct amdgpu_ring *ring = adev->rings[i];
+		if (ring && ring->ready)
+			amdgpu_fence_wait_empty(ring);
+	}
+
+	/* program the new power state */
+	amdgpu_dpm_set_power_state(adev);
+
+	/* update current power state */
+	adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
+
+	amdgpu_dpm_post_set_power_state(adev);
+
+	if (adev->pm.funcs->force_performance_level) {
+		if (adev->pm.dpm.thermal_active) {
+			enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
+			/* force low perf level for thermal */
+			amdgpu_dpm_force_performance_level(adev, AMDGPU_DPM_FORCED_LEVEL_LOW);
+			/* save the user's level */
+			adev->pm.dpm.forced_level = level;
+		} else {
+			/* otherwise, user selected level */
+			amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
+		}
+	}
+
+done:
+	mutex_unlock(&adev->ring_lock);
+	mutex_unlock(&adev->ddev->struct_mutex);
+}
+
+void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
+{
+	if (adev->pm.funcs->powergate_uvd) {
+		mutex_lock(&adev->pm.mutex);
+		/* enable/disable UVD */
+		amdgpu_dpm_powergate_uvd(adev, !enable);
+		mutex_unlock(&adev->pm.mutex);
+	} else {
+		if (enable) {
+			mutex_lock(&adev->pm.mutex);
+			adev->pm.dpm.uvd_active = true;
+			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
+			mutex_unlock(&adev->pm.mutex);
+		} else {
+			mutex_lock(&adev->pm.mutex);
+			adev->pm.dpm.uvd_active = false;
+			mutex_unlock(&adev->pm.mutex);
+		}
+
+		amdgpu_pm_compute_clocks(adev);
+	}
+}
+
+void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
+{
+	if (enable) {
+		mutex_lock(&adev->pm.mutex);
+		adev->pm.dpm.vce_active = true;
+		/* XXX select vce level based on ring/task */
+		adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
+		mutex_unlock(&adev->pm.mutex);
+	} else {
+		mutex_lock(&adev->pm.mutex);
+		adev->pm.dpm.vce_active = false;
+		mutex_unlock(&adev->pm.mutex);
+	}
+
+	amdgpu_pm_compute_clocks(adev);
+}
+
+void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
+{
+	int i;
+
+	for (i = 0; i < adev->pm.dpm.num_ps; i++) {
+		printk("== power state %d ==\n", i);
+		amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
+	}
+}
+
+int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
+{
+	int ret;
+
+	if (adev->pm.funcs->get_temperature == NULL)
+		return 0;
+	adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
+								   DRIVER_NAME, adev,
+								   hwmon_groups);
+	if (IS_ERR(adev->pm.int_hwmon_dev)) {
+		ret = PTR_ERR(adev->pm.int_hwmon_dev);
+		dev_err(adev->dev,
+			"Unable to register hwmon device: %d\n", ret);
+		return ret;
+	}
+
+	ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
+	if (ret) {
+		DRM_ERROR("failed to create device file for dpm state\n");
+		return ret;
+	}
+	ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
+	if (ret) {
+		DRM_ERROR("failed to create device file for dpm state\n");
+		return ret;
+	}
+	ret = amdgpu_debugfs_pm_init(adev);
+	if (ret) {
+		DRM_ERROR("Failed to register debugfs file for dpm!\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
+{
+	if (adev->pm.int_hwmon_dev)
+		hwmon_device_unregister(adev->pm.int_hwmon_dev);
+	device_remove_file(adev->dev, &dev_attr_power_dpm_state);
+	device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
+}
+
+void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
+{
+	struct drm_device *ddev = adev->ddev;
+	struct drm_crtc *crtc;
+	struct amdgpu_crtc *amdgpu_crtc;
+
+	if (!adev->pm.dpm_enabled)
+		return;
+
+	mutex_lock(&adev->pm.mutex);
+
+	/* update active crtc counts */
+	adev->pm.dpm.new_active_crtcs = 0;
+	adev->pm.dpm.new_active_crtc_count = 0;
+	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
+		list_for_each_entry(crtc,
+				    &ddev->mode_config.crtc_list, head) {
+			amdgpu_crtc = to_amdgpu_crtc(crtc);
+			if (crtc->enabled) {
+				adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
+				adev->pm.dpm.new_active_crtc_count++;
+			}
+		}
+	}
+
+	/* update battery/ac status */
+	if (power_supply_is_system_supplied() > 0)
+		adev->pm.dpm.ac_power = true;
+	else
+		adev->pm.dpm.ac_power = false;
+
+	amdgpu_dpm_change_power_state_locked(adev);
+
+	mutex_unlock(&adev->pm.mutex);
+
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+
+	if (adev->pm.dpm_enabled) {
+		mutex_lock(&adev->pm.mutex);
+		if (adev->pm.funcs->debugfs_print_current_performance_level)
+			amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
+		else
+			seq_printf(m, "Debugfs support not implemented for this asic\n");
+		mutex_unlock(&adev->pm.mutex);
+	}
+
+	return 0;
+}
+
+static struct drm_info_list amdgpu_pm_info_list[] = {
+	{"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
+};
+#endif
+
+static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
+#else
+	return 0;
+#endif
+}

+ 35 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h

@@ -0,0 +1,35 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_PM_H__
+#define __AMDGPU_PM_H__
+
+int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
+void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev);
+void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
+void amdgpu_pm_compute_clocks(struct amdgpu_device *adev);
+void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
+void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
+void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
+
+#endif

+ 125 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c

@@ -0,0 +1,125 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * based on nouveau_prime.c
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+
+#include "amdgpu.h"
+#include <drm/amdgpu_drm.h>
+#include <linux/dma-buf.h>
+
+struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+	int npages = bo->tbo.num_pages;
+
+	return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
+}
+
+void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
+{
+	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+	int ret;
+
+	ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
+			  &bo->dma_buf_vmap);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return bo->dma_buf_vmap.virtual;
+}
+
+void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+	ttm_bo_kunmap(&bo->dma_buf_vmap);
+}
+
+struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
+							struct dma_buf_attachment *attach,
+							struct sg_table *sg)
+{
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_bo *bo;
+	int ret;
+
+	ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
+			       AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo);
+	if (ret)
+		return ERR_PTR(ret);
+
+	mutex_lock(&adev->gem.mutex);
+	list_add_tail(&bo->list, &adev->gem.objects);
+	mutex_unlock(&adev->gem.mutex);
+
+	return &bo->gem_base;
+}
+
+int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
+{
+	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+	int ret = 0;
+
+	ret = amdgpu_bo_reserve(bo, false);
+	if (unlikely(ret != 0))
+		return ret;
+
+	/* pin buffer into GTT */
+	ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
+	amdgpu_bo_unreserve(bo);
+	return ret;
+}
+
+void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
+{
+	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+	int ret = 0;
+
+	ret = amdgpu_bo_reserve(bo, false);
+	if (unlikely(ret != 0))
+		return;
+
+	amdgpu_bo_unpin(bo);
+	amdgpu_bo_unreserve(bo);
+}
+
+struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
+{
+	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+	return bo->tbo.resv;
+}
+
+struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
+					struct drm_gem_object *gobj,
+					int flags)
+{
+	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+
+	if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm))
+		return ERR_PTR(-EPERM);
+
+	return drm_gem_prime_export(dev, gobj, flags);
+}

+ 561 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c

@@ -0,0 +1,561 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ *          Christian König
+ */
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+#include "atom.h"
+
+/*
+ * Rings
+ * Most engines on the GPU are fed via ring buffers.  Ring
+ * buffers are areas of GPU accessible memory that the host
+ * writes commands into and the GPU reads commands out of.
+ * There is a rptr (read pointer) that determines where the
+ * GPU is currently reading, and a wptr (write pointer)
+ * which determines where the host has written.  When the
+ * pointers are equal, the ring is idle.  When the host
+ * writes commands to the ring buffer, it increments the
+ * wptr.  The GPU then starts fetching commands and executes
+ * them until the pointers are equal again.
+ */
+static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+
+/**
+ * amdgpu_ring_free_size - update the free size
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Update the free dw slots in the ring buffer (all asics).
+ */
+void amdgpu_ring_free_size(struct amdgpu_ring *ring)
+{
+	uint32_t rptr = amdgpu_ring_get_rptr(ring);
+
+	/* This works because ring_size is a power of 2 */
+	ring->ring_free_dw = rptr + (ring->ring_size / 4);
+	ring->ring_free_dw -= ring->wptr;
+	ring->ring_free_dw &= ring->ptr_mask;
+	if (!ring->ring_free_dw) {
+		/* this is an empty ring */
+		ring->ring_free_dw = ring->ring_size / 4;
+		/*  update lockup info to avoid false positive */
+		amdgpu_ring_lockup_update(ring);
+	}
+}
+
+/**
+ * amdgpu_ring_alloc - allocate space on the ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: amdgpu_ring structure holding ring information
+ * @ndw: number of dwords to allocate in the ring buffer
+ *
+ * Allocate @ndw dwords in the ring buffer (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
+{
+	int r;
+
+	/* make sure we aren't trying to allocate more space than there is on the ring */
+	if (ndw > (ring->ring_size / 4))
+		return -ENOMEM;
+	/* Align requested size with padding so unlock_commit can
+	 * pad safely */
+	amdgpu_ring_free_size(ring);
+	ndw = (ndw + ring->align_mask) & ~ring->align_mask;
+	while (ndw > (ring->ring_free_dw - 1)) {
+		amdgpu_ring_free_size(ring);
+		if (ndw < ring->ring_free_dw) {
+			break;
+		}
+		r = amdgpu_fence_wait_next(ring);
+		if (r)
+			return r;
+	}
+	ring->count_dw = ndw;
+	ring->wptr_old = ring->wptr;
+	return 0;
+}
+
+/**
+ * amdgpu_ring_lock - lock the ring and allocate space on it
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: amdgpu_ring structure holding ring information
+ * @ndw: number of dwords to allocate in the ring buffer
+ *
+ * Lock the ring and allocate @ndw dwords in the ring buffer
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw)
+{
+	int r;
+
+	mutex_lock(ring->ring_lock);
+	r = amdgpu_ring_alloc(ring, ndw);
+	if (r) {
+		mutex_unlock(ring->ring_lock);
+		return r;
+	}
+	return 0;
+}
+
+/**
+ * amdgpu_ring_commit - tell the GPU to execute the new
+ * commands on the ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Update the wptr (write pointer) to tell the GPU to
+ * execute new commands on the ring buffer (all asics).
+ */
+void amdgpu_ring_commit(struct amdgpu_ring *ring)
+{
+	/* We pad to match fetch size */
+	while (ring->wptr & ring->align_mask) {
+		amdgpu_ring_write(ring, ring->nop);
+	}
+	mb();
+	amdgpu_ring_set_wptr(ring);
+}
+
+/**
+ * amdgpu_ring_unlock_commit - tell the GPU to execute the new
+ * commands on the ring buffer and unlock it
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Call amdgpu_ring_commit() then unlock the ring (all asics).
+ */
+void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring)
+{
+	amdgpu_ring_commit(ring);
+	mutex_unlock(ring->ring_lock);
+}
+
+/**
+ * amdgpu_ring_undo - reset the wptr
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Reset the driver's copy of the wptr (all asics).
+ */
+void amdgpu_ring_undo(struct amdgpu_ring *ring)
+{
+	ring->wptr = ring->wptr_old;
+}
+
+/**
+ * amdgpu_ring_unlock_undo - reset the wptr and unlock the ring
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Call amdgpu_ring_undo() then unlock the ring (all asics).
+ */
+void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring)
+{
+	amdgpu_ring_undo(ring);
+	mutex_unlock(ring->ring_lock);
+}
+
+/**
+ * amdgpu_ring_lockup_update - update lockup variables
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Update the last rptr value and timestamp (all asics).
+ */
+void amdgpu_ring_lockup_update(struct amdgpu_ring *ring)
+{
+	atomic_set(&ring->last_rptr, amdgpu_ring_get_rptr(ring));
+	atomic64_set(&ring->last_activity, jiffies_64);
+}
+
+/**
+ * amdgpu_ring_test_lockup() - check if ring is lockedup by recording information
+ * @ring:       amdgpu_ring structure holding ring information
+ *
+ */
+bool amdgpu_ring_test_lockup(struct amdgpu_ring *ring)
+{
+	uint32_t rptr = amdgpu_ring_get_rptr(ring);
+	uint64_t last = atomic64_read(&ring->last_activity);
+	uint64_t elapsed;
+
+	if (rptr != atomic_read(&ring->last_rptr)) {
+		/* ring is still working, no lockup */
+		amdgpu_ring_lockup_update(ring);
+		return false;
+	}
+
+	elapsed = jiffies_to_msecs(jiffies_64 - last);
+	if (amdgpu_lockup_timeout && elapsed >= amdgpu_lockup_timeout) {
+		dev_err(ring->adev->dev, "ring %d stalled for more than %llumsec\n",
+			ring->idx, elapsed);
+		return true;
+	}
+	/* give a chance to the GPU ... */
+	return false;
+}
+
+/**
+ * amdgpu_ring_backup - Back up the content of a ring
+ *
+ * @ring: the ring we want to back up
+ *
+ * Saves all unprocessed commits from a ring, returns the number of dwords saved.
+ */
+unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
+			    uint32_t **data)
+{
+	unsigned size, ptr, i;
+
+	/* just in case lock the ring */
+	mutex_lock(ring->ring_lock);
+	*data = NULL;
+
+	if (ring->ring_obj == NULL) {
+		mutex_unlock(ring->ring_lock);
+		return 0;
+	}
+
+	/* it doesn't make sense to save anything if all fences are signaled */
+	if (!amdgpu_fence_count_emitted(ring)) {
+		mutex_unlock(ring->ring_lock);
+		return 0;
+	}
+
+	ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
+
+	size = ring->wptr + (ring->ring_size / 4);
+	size -= ptr;
+	size &= ring->ptr_mask;
+	if (size == 0) {
+		mutex_unlock(ring->ring_lock);
+		return 0;
+	}
+
+	/* and then save the content of the ring */
+	*data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
+	if (!*data) {
+		mutex_unlock(ring->ring_lock);
+		return 0;
+	}
+	for (i = 0; i < size; ++i) {
+		(*data)[i] = ring->ring[ptr++];
+		ptr &= ring->ptr_mask;
+	}
+
+	mutex_unlock(ring->ring_lock);
+	return size;
+}
+
+/**
+ * amdgpu_ring_restore - append saved commands to the ring again
+ *
+ * @ring: ring to append commands to
+ * @size: number of dwords we want to write
+ * @data: saved commands
+ *
+ * Allocates space on the ring and restore the previously saved commands.
+ */
+int amdgpu_ring_restore(struct amdgpu_ring *ring,
+			unsigned size, uint32_t *data)
+{
+	int i, r;
+
+	if (!size || !data)
+		return 0;
+
+	/* restore the saved ring content */
+	r = amdgpu_ring_lock(ring, size);
+	if (r)
+		return r;
+
+	for (i = 0; i < size; ++i) {
+		amdgpu_ring_write(ring, data[i]);
+	}
+
+	amdgpu_ring_unlock_commit(ring);
+	kfree(data);
+	return 0;
+}
+
+/**
+ * amdgpu_ring_init - init driver ring struct.
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: amdgpu_ring structure holding ring information
+ * @ring_size: size of the ring
+ * @nop: nop packet for this ring
+ *
+ * Initialize the driver information for the selected ring (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
+		     unsigned ring_size, u32 nop, u32 align_mask,
+		     struct amdgpu_irq_src *irq_src, unsigned irq_type,
+		     enum amdgpu_ring_type ring_type)
+{
+	u32 rb_bufsz;
+	int r;
+
+	if (ring->adev == NULL) {
+		if (adev->num_rings >= AMDGPU_MAX_RINGS)
+			return -EINVAL;
+
+		ring->adev = adev;
+		ring->idx = adev->num_rings++;
+		adev->rings[ring->idx] = ring;
+		amdgpu_fence_driver_init_ring(ring);
+	}
+
+	r = amdgpu_wb_get(adev, &ring->rptr_offs);
+	if (r) {
+		dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
+		return r;
+	}
+
+	r = amdgpu_wb_get(adev, &ring->wptr_offs);
+	if (r) {
+		dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
+		return r;
+	}
+
+	r = amdgpu_wb_get(adev, &ring->fence_offs);
+	if (r) {
+		dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
+		return r;
+	}
+
+	r = amdgpu_wb_get(adev, &ring->next_rptr_offs);
+	if (r) {
+		dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r);
+		return r;
+	}
+	ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4);
+	ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
+
+	r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
+	if (r) {
+		dev_err(adev->dev, "failed initializing fences (%d).\n", r);
+		return r;
+	}
+
+	ring->ring_lock = &adev->ring_lock;
+	/* Align ring size */
+	rb_bufsz = order_base_2(ring_size / 8);
+	ring_size = (1 << (rb_bufsz + 1)) * 4;
+	ring->ring_size = ring_size;
+	ring->align_mask = align_mask;
+	ring->nop = nop;
+	ring->type = ring_type;
+
+	/* Allocate ring buffer */
+	if (ring->ring_obj == NULL) {
+		r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true,
+				     AMDGPU_GEM_DOMAIN_GTT, 0,
+				     NULL, &ring->ring_obj);
+		if (r) {
+			dev_err(adev->dev, "(%d) ring create failed\n", r);
+			return r;
+		}
+		r = amdgpu_bo_reserve(ring->ring_obj, false);
+		if (unlikely(r != 0))
+			return r;
+		r = amdgpu_bo_pin(ring->ring_obj, AMDGPU_GEM_DOMAIN_GTT,
+					&ring->gpu_addr);
+		if (r) {
+			amdgpu_bo_unreserve(ring->ring_obj);
+			dev_err(adev->dev, "(%d) ring pin failed\n", r);
+			return r;
+		}
+		r = amdgpu_bo_kmap(ring->ring_obj,
+				       (void **)&ring->ring);
+		amdgpu_bo_unreserve(ring->ring_obj);
+		if (r) {
+			dev_err(adev->dev, "(%d) ring map failed\n", r);
+			return r;
+		}
+	}
+	ring->ptr_mask = (ring->ring_size / 4) - 1;
+	ring->ring_free_dw = ring->ring_size / 4;
+
+	if (amdgpu_debugfs_ring_init(adev, ring)) {
+		DRM_ERROR("Failed to register debugfs file for rings !\n");
+	}
+	amdgpu_ring_lockup_update(ring);
+	return 0;
+}
+
+/**
+ * amdgpu_ring_fini - tear down the driver ring struct.
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Tear down the driver information for the selected ring (all asics).
+ */
+void amdgpu_ring_fini(struct amdgpu_ring *ring)
+{
+	int r;
+	struct amdgpu_bo *ring_obj;
+
+	if (ring->ring_lock == NULL)
+		return;
+
+	mutex_lock(ring->ring_lock);
+	ring_obj = ring->ring_obj;
+	ring->ready = false;
+	ring->ring = NULL;
+	ring->ring_obj = NULL;
+	mutex_unlock(ring->ring_lock);
+
+	amdgpu_wb_free(ring->adev, ring->fence_offs);
+	amdgpu_wb_free(ring->adev, ring->rptr_offs);
+	amdgpu_wb_free(ring->adev, ring->wptr_offs);
+	amdgpu_wb_free(ring->adev, ring->next_rptr_offs);
+
+	if (ring_obj) {
+		r = amdgpu_bo_reserve(ring_obj, false);
+		if (likely(r == 0)) {
+			amdgpu_bo_kunmap(ring_obj);
+			amdgpu_bo_unpin(ring_obj);
+			amdgpu_bo_unreserve(ring_obj);
+		}
+		amdgpu_bo_unref(&ring_obj);
+	}
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	int roffset = *(int*)node->info_ent->data;
+	struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset);
+
+	uint32_t rptr, wptr, rptr_next;
+	unsigned count, i, j;
+
+	amdgpu_ring_free_size(ring);
+	count = (ring->ring_size / 4) - ring->ring_free_dw;
+
+	wptr = amdgpu_ring_get_wptr(ring);
+	seq_printf(m, "wptr: 0x%08x [%5d]\n",
+		   wptr, wptr);
+
+	rptr = amdgpu_ring_get_rptr(ring);
+	seq_printf(m, "rptr: 0x%08x [%5d]\n",
+		   rptr, rptr);
+
+	rptr_next = ~0;
+
+	seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
+		   ring->wptr, ring->wptr);
+	seq_printf(m, "last semaphore signal addr : 0x%016llx\n",
+		   ring->last_semaphore_signal_addr);
+	seq_printf(m, "last semaphore wait addr   : 0x%016llx\n",
+		   ring->last_semaphore_wait_addr);
+	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
+	seq_printf(m, "%u dwords in ring\n", count);
+
+	if (!ring->ready)
+		return 0;
+
+	/* print 8 dw before current rptr as often it's the last executed
+	 * packet that is the root issue
+	 */
+	i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
+	for (j = 0; j <= (count + 32); j++) {
+		seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
+		if (rptr == i)
+			seq_puts(m, " *");
+		if (rptr_next == i)
+			seq_puts(m, " #");
+		seq_puts(m, "\n");
+		i = (i + 1) & ring->ptr_mask;
+	}
+	return 0;
+}
+
+/* TODO: clean this up !*/
+static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]);
+static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]);
+static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]);
+static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma[0].ring);
+static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma[1].ring);
+static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring);
+static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]);
+static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]);
+
+static struct drm_info_list amdgpu_debugfs_ring_info_list[] = {
+	{"amdgpu_ring_gfx", amdgpu_debugfs_ring_info, 0, &amdgpu_gfx_index},
+	{"amdgpu_ring_cp1", amdgpu_debugfs_ring_info, 0, &cayman_cp1_index},
+	{"amdgpu_ring_cp2", amdgpu_debugfs_ring_info, 0, &cayman_cp2_index},
+	{"amdgpu_ring_dma1", amdgpu_debugfs_ring_info, 0, &amdgpu_dma1_index},
+	{"amdgpu_ring_dma2", amdgpu_debugfs_ring_info, 0, &amdgpu_dma2_index},
+	{"amdgpu_ring_uvd", amdgpu_debugfs_ring_info, 0, &r600_uvd_index},
+	{"amdgpu_ring_vce1", amdgpu_debugfs_ring_info, 0, &si_vce1_index},
+	{"amdgpu_ring_vce2", amdgpu_debugfs_ring_info, 0, &si_vce2_index},
+};
+
+#endif
+
+static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+#if defined(CONFIG_DEBUG_FS)
+	unsigned i;
+	for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) {
+		struct drm_info_list *info = &amdgpu_debugfs_ring_info_list[i];
+		int roffset = *(int*)amdgpu_debugfs_ring_info_list[i].data;
+		struct amdgpu_ring *other = (void *)(((uint8_t*)adev) + roffset);
+		unsigned r;
+
+		if (other != ring)
+			continue;
+
+		r = amdgpu_debugfs_add_files(adev, info, 1);
+		if (r)
+			return r;
+	}
+#endif
+	return 0;
+}

+ 419 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c

@@ -0,0 +1,419 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Jerome Glisse <glisse@freedesktop.org>
+ */
+/* Algorithm:
+ *
+ * We store the last allocated bo in "hole", we always try to allocate
+ * after the last allocated bo. Principle is that in a linear GPU ring
+ * progression was is after last is the oldest bo we allocated and thus
+ * the first one that should no longer be in use by the GPU.
+ *
+ * If it's not the case we skip over the bo after last to the closest
+ * done bo if such one exist. If none exist and we are not asked to
+ * block we report failure to allocate.
+ *
+ * If we are asked to block we wait on all the oldest fence of all
+ * rings. We just wait for any of those fence to complete.
+ */
+#include <drm/drmP.h>
+#include "amdgpu.h"
+
+static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
+static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
+
+int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
+			      struct amdgpu_sa_manager *sa_manager,
+			      unsigned size, u32 align, u32 domain)
+{
+	int i, r;
+
+	init_waitqueue_head(&sa_manager->wq);
+	sa_manager->bo = NULL;
+	sa_manager->size = size;
+	sa_manager->domain = domain;
+	sa_manager->align = align;
+	sa_manager->hole = &sa_manager->olist;
+	INIT_LIST_HEAD(&sa_manager->olist);
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+		INIT_LIST_HEAD(&sa_manager->flist[i]);
+	}
+
+	r = amdgpu_bo_create(adev, size, align, true,
+			     domain, 0, NULL, &sa_manager->bo);
+	if (r) {
+		dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
+		return r;
+	}
+
+	return r;
+}
+
+void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
+			       struct amdgpu_sa_manager *sa_manager)
+{
+	struct amdgpu_sa_bo *sa_bo, *tmp;
+
+	if (!list_empty(&sa_manager->olist)) {
+		sa_manager->hole = &sa_manager->olist,
+		amdgpu_sa_bo_try_free(sa_manager);
+		if (!list_empty(&sa_manager->olist)) {
+			dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
+		}
+	}
+	list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
+		amdgpu_sa_bo_remove_locked(sa_bo);
+	}
+	amdgpu_bo_unref(&sa_manager->bo);
+	sa_manager->size = 0;
+}
+
+int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
+			       struct amdgpu_sa_manager *sa_manager)
+{
+	int r;
+
+	if (sa_manager->bo == NULL) {
+		dev_err(adev->dev, "no bo for sa manager\n");
+		return -EINVAL;
+	}
+
+	/* map the buffer */
+	r = amdgpu_bo_reserve(sa_manager->bo, false);
+	if (r) {
+		dev_err(adev->dev, "(%d) failed to reserve manager bo\n", r);
+		return r;
+	}
+	r = amdgpu_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
+	if (r) {
+		amdgpu_bo_unreserve(sa_manager->bo);
+		dev_err(adev->dev, "(%d) failed to pin manager bo\n", r);
+		return r;
+	}
+	r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
+	amdgpu_bo_unreserve(sa_manager->bo);
+	return r;
+}
+
+int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
+				 struct amdgpu_sa_manager *sa_manager)
+{
+	int r;
+
+	if (sa_manager->bo == NULL) {
+		dev_err(adev->dev, "no bo for sa manager\n");
+		return -EINVAL;
+	}
+
+	r = amdgpu_bo_reserve(sa_manager->bo, false);
+	if (!r) {
+		amdgpu_bo_kunmap(sa_manager->bo);
+		amdgpu_bo_unpin(sa_manager->bo);
+		amdgpu_bo_unreserve(sa_manager->bo);
+	}
+	return r;
+}
+
+static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
+{
+	struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
+	if (sa_manager->hole == &sa_bo->olist) {
+		sa_manager->hole = sa_bo->olist.prev;
+	}
+	list_del_init(&sa_bo->olist);
+	list_del_init(&sa_bo->flist);
+	amdgpu_fence_unref(&sa_bo->fence);
+	kfree(sa_bo);
+}
+
+static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
+{
+	struct amdgpu_sa_bo *sa_bo, *tmp;
+
+	if (sa_manager->hole->next == &sa_manager->olist)
+		return;
+
+	sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
+	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
+		if (sa_bo->fence == NULL || !amdgpu_fence_signaled(sa_bo->fence)) {
+			return;
+		}
+		amdgpu_sa_bo_remove_locked(sa_bo);
+	}
+}
+
+static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
+{
+	struct list_head *hole = sa_manager->hole;
+
+	if (hole != &sa_manager->olist) {
+		return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
+	}
+	return 0;
+}
+
+static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
+{
+	struct list_head *hole = sa_manager->hole;
+
+	if (hole->next != &sa_manager->olist) {
+		return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
+	}
+	return sa_manager->size;
+}
+
+static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
+				   struct amdgpu_sa_bo *sa_bo,
+				   unsigned size, unsigned align)
+{
+	unsigned soffset, eoffset, wasted;
+
+	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
+	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
+	wasted = (align - (soffset % align)) % align;
+
+	if ((eoffset - soffset) >= (size + wasted)) {
+		soffset += wasted;
+
+		sa_bo->manager = sa_manager;
+		sa_bo->soffset = soffset;
+		sa_bo->eoffset = soffset + size;
+		list_add(&sa_bo->olist, sa_manager->hole);
+		INIT_LIST_HEAD(&sa_bo->flist);
+		sa_manager->hole = &sa_bo->olist;
+		return true;
+	}
+	return false;
+}
+
+/**
+ * amdgpu_sa_event - Check if we can stop waiting
+ *
+ * @sa_manager: pointer to the sa_manager
+ * @size: number of bytes we want to allocate
+ * @align: alignment we need to match
+ *
+ * Check if either there is a fence we can wait for or
+ * enough free memory to satisfy the allocation directly
+ */
+static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
+			    unsigned size, unsigned align)
+{
+	unsigned soffset, eoffset, wasted;
+	int i;
+
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+		if (!list_empty(&sa_manager->flist[i])) {
+			return true;
+		}
+	}
+
+	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
+	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
+	wasted = (align - (soffset % align)) % align;
+
+	if ((eoffset - soffset) >= (size + wasted)) {
+		return true;
+	}
+
+	return false;
+}
+
+static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
+				   struct amdgpu_fence **fences,
+				   unsigned *tries)
+{
+	struct amdgpu_sa_bo *best_bo = NULL;
+	unsigned i, soffset, best, tmp;
+
+	/* if hole points to the end of the buffer */
+	if (sa_manager->hole->next == &sa_manager->olist) {
+		/* try again with its beginning */
+		sa_manager->hole = &sa_manager->olist;
+		return true;
+	}
+
+	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
+	/* to handle wrap around we add sa_manager->size */
+	best = sa_manager->size * 2;
+	/* go over all fence list and try to find the closest sa_bo
+	 * of the current last
+	 */
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+		struct amdgpu_sa_bo *sa_bo;
+
+		if (list_empty(&sa_manager->flist[i])) {
+			continue;
+		}
+
+		sa_bo = list_first_entry(&sa_manager->flist[i],
+					 struct amdgpu_sa_bo, flist);
+
+		if (!amdgpu_fence_signaled(sa_bo->fence)) {
+			fences[i] = sa_bo->fence;
+			continue;
+		}
+
+		/* limit the number of tries each ring gets */
+		if (tries[i] > 2) {
+			continue;
+		}
+
+		tmp = sa_bo->soffset;
+		if (tmp < soffset) {
+			/* wrap around, pretend it's after */
+			tmp += sa_manager->size;
+		}
+		tmp -= soffset;
+		if (tmp < best) {
+			/* this sa bo is the closest one */
+			best = tmp;
+			best_bo = sa_bo;
+		}
+	}
+
+	if (best_bo) {
+		++tries[best_bo->fence->ring->idx];
+		sa_manager->hole = best_bo->olist.prev;
+
+		/* we knew that this one is signaled,
+		   so it's save to remote it */
+		amdgpu_sa_bo_remove_locked(best_bo);
+		return true;
+	}
+	return false;
+}
+
+int amdgpu_sa_bo_new(struct amdgpu_device *adev,
+		     struct amdgpu_sa_manager *sa_manager,
+		     struct amdgpu_sa_bo **sa_bo,
+		     unsigned size, unsigned align)
+{
+	struct amdgpu_fence *fences[AMDGPU_MAX_RINGS];
+	unsigned tries[AMDGPU_MAX_RINGS];
+	int i, r;
+
+	BUG_ON(align > sa_manager->align);
+	BUG_ON(size > sa_manager->size);
+
+	*sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
+	if ((*sa_bo) == NULL) {
+		return -ENOMEM;
+	}
+	(*sa_bo)->manager = sa_manager;
+	(*sa_bo)->fence = NULL;
+	INIT_LIST_HEAD(&(*sa_bo)->olist);
+	INIT_LIST_HEAD(&(*sa_bo)->flist);
+
+	spin_lock(&sa_manager->wq.lock);
+	do {
+		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+			fences[i] = NULL;
+			tries[i] = 0;
+		}
+
+		do {
+			amdgpu_sa_bo_try_free(sa_manager);
+
+			if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
+						   size, align)) {
+				spin_unlock(&sa_manager->wq.lock);
+				return 0;
+			}
+
+			/* see if we can skip over some allocations */
+		} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
+
+		spin_unlock(&sa_manager->wq.lock);
+		r = amdgpu_fence_wait_any(adev, fences, false);
+		spin_lock(&sa_manager->wq.lock);
+		/* if we have nothing to wait for block */
+		if (r == -ENOENT) {
+			r = wait_event_interruptible_locked(
+				sa_manager->wq,
+				amdgpu_sa_event(sa_manager, size, align)
+			);
+		}
+
+	} while (!r);
+
+	spin_unlock(&sa_manager->wq.lock);
+	kfree(*sa_bo);
+	*sa_bo = NULL;
+	return r;
+}
+
+void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
+		       struct amdgpu_fence *fence)
+{
+	struct amdgpu_sa_manager *sa_manager;
+
+	if (sa_bo == NULL || *sa_bo == NULL) {
+		return;
+	}
+
+	sa_manager = (*sa_bo)->manager;
+	spin_lock(&sa_manager->wq.lock);
+	if (fence && !amdgpu_fence_signaled(fence)) {
+		(*sa_bo)->fence = amdgpu_fence_ref(fence);
+		list_add_tail(&(*sa_bo)->flist,
+			      &sa_manager->flist[fence->ring->idx]);
+	} else {
+		amdgpu_sa_bo_remove_locked(*sa_bo);
+	}
+	wake_up_all_locked(&sa_manager->wq);
+	spin_unlock(&sa_manager->wq.lock);
+	*sa_bo = NULL;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
+				  struct seq_file *m)
+{
+	struct amdgpu_sa_bo *i;
+
+	spin_lock(&sa_manager->wq.lock);
+	list_for_each_entry(i, &sa_manager->olist, olist) {
+		uint64_t soffset = i->soffset + sa_manager->gpu_addr;
+		uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
+		if (&i->olist == sa_manager->hole) {
+			seq_printf(m, ">");
+		} else {
+			seq_printf(m, " ");
+		}
+		seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
+			   soffset, eoffset, eoffset - soffset);
+		if (i->fence) {
+			seq_printf(m, " protected by 0x%016llx on ring %d",
+				   i->fence->seq, i->fence->ring->idx);
+		}
+		seq_printf(m, "\n");
+	}
+	spin_unlock(&sa_manager->wq.lock);
+}
+#endif

+ 102 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c

@@ -0,0 +1,102 @@
+/*
+ * Copyright 2011 Christian König.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Christian König <deathsimple@vodafone.de>
+ */
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_trace.h"
+
+int amdgpu_semaphore_create(struct amdgpu_device *adev,
+			    struct amdgpu_semaphore **semaphore)
+{
+	int r;
+
+	*semaphore = kmalloc(sizeof(struct amdgpu_semaphore), GFP_KERNEL);
+	if (*semaphore == NULL) {
+		return -ENOMEM;
+	}
+	r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo,
+			     &(*semaphore)->sa_bo, 8, 8);
+	if (r) {
+		kfree(*semaphore);
+		*semaphore = NULL;
+		return r;
+	}
+	(*semaphore)->waiters = 0;
+	(*semaphore)->gpu_addr = amdgpu_sa_bo_gpu_addr((*semaphore)->sa_bo);
+
+	*((uint64_t *)amdgpu_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
+
+	return 0;
+}
+
+bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring,
+				  struct amdgpu_semaphore *semaphore)
+{
+	trace_amdgpu_semaphore_signale(ring->idx, semaphore);
+
+	if (amdgpu_ring_emit_semaphore(ring, semaphore, false)) {
+		--semaphore->waiters;
+
+		/* for debugging lockup only, used by sysfs debug files */
+		ring->last_semaphore_signal_addr = semaphore->gpu_addr;
+		return true;
+	}
+	return false;
+}
+
+bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
+				struct amdgpu_semaphore *semaphore)
+{
+	trace_amdgpu_semaphore_wait(ring->idx, semaphore);
+
+	if (amdgpu_ring_emit_semaphore(ring, semaphore, true)) {
+		++semaphore->waiters;
+
+		/* for debugging lockup only, used by sysfs debug files */
+		ring->last_semaphore_wait_addr = semaphore->gpu_addr;
+		return true;
+	}
+	return false;
+}
+
+void amdgpu_semaphore_free(struct amdgpu_device *adev,
+			   struct amdgpu_semaphore **semaphore,
+			   struct amdgpu_fence *fence)
+{
+	if (semaphore == NULL || *semaphore == NULL) {
+		return;
+	}
+	if ((*semaphore)->waiters > 0) {
+		dev_err(adev->dev, "semaphore %p has more waiters than signalers,"
+			" hardware lockup imminent!\n", *semaphore);
+	}
+	amdgpu_sa_bo_free(adev, &(*semaphore)->sa_bo, fence);
+	kfree(*semaphore);
+	*semaphore = NULL;
+}

+ 234 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c

@@ -0,0 +1,234 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Christian König <christian.koenig@amd.com>
+ */
+
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_trace.h"
+
+/**
+ * amdgpu_sync_create - zero init sync object
+ *
+ * @sync: sync object to initialize
+ *
+ * Just clear the sync object for now.
+ */
+void amdgpu_sync_create(struct amdgpu_sync *sync)
+{
+	unsigned i;
+
+	for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
+		sync->semaphores[i] = NULL;
+
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+		sync->sync_to[i] = NULL;
+
+	sync->last_vm_update = NULL;
+}
+
+/**
+ * amdgpu_sync_fence - use the semaphore to sync to a fence
+ *
+ * @sync: sync object to add fence to
+ * @fence: fence to sync to
+ *
+ * Sync to the fence using the semaphore objects
+ */
+void amdgpu_sync_fence(struct amdgpu_sync *sync,
+		       struct amdgpu_fence *fence)
+{
+	struct amdgpu_fence *other;
+
+	if (!fence)
+		return;
+
+	other = sync->sync_to[fence->ring->idx];
+	sync->sync_to[fence->ring->idx] = amdgpu_fence_ref(
+		amdgpu_fence_later(fence, other));
+	amdgpu_fence_unref(&other);
+
+	if (fence->owner == AMDGPU_FENCE_OWNER_VM) {
+		other = sync->last_vm_update;
+		sync->last_vm_update = amdgpu_fence_ref(
+			amdgpu_fence_later(fence, other));
+		amdgpu_fence_unref(&other);
+	}
+}
+
+/**
+ * amdgpu_sync_resv - use the semaphores to sync to a reservation object
+ *
+ * @sync: sync object to add fences from reservation object to
+ * @resv: reservation object with embedded fence
+ * @shared: true if we should only sync to the exclusive fence
+ *
+ * Sync to the fence using the semaphore objects
+ */
+int amdgpu_sync_resv(struct amdgpu_device *adev,
+		     struct amdgpu_sync *sync,
+		     struct reservation_object *resv,
+		     void *owner)
+{
+	struct reservation_object_list *flist;
+	struct fence *f;
+	struct amdgpu_fence *fence;
+	unsigned i;
+	int r = 0;
+
+	if (resv == NULL)
+		return -EINVAL;
+
+	/* always sync to the exclusive fence */
+	f = reservation_object_get_excl(resv);
+	fence = f ? to_amdgpu_fence(f) : NULL;
+	if (fence && fence->ring->adev == adev)
+		amdgpu_sync_fence(sync, fence);
+	else if (f)
+		r = fence_wait(f, true);
+
+	flist = reservation_object_get_list(resv);
+	if (!flist || r)
+		return r;
+
+	for (i = 0; i < flist->shared_count; ++i) {
+		f = rcu_dereference_protected(flist->shared[i],
+					      reservation_object_held(resv));
+		fence = f ? to_amdgpu_fence(f) : NULL;
+		if (fence && fence->ring->adev == adev) {
+			if (fence->owner != owner ||
+			    fence->owner == AMDGPU_FENCE_OWNER_UNDEFINED)
+				amdgpu_sync_fence(sync, fence);
+		} else if (f) {
+			r = fence_wait(f, true);
+			if (r)
+				break;
+		}
+	}
+	return r;
+}
+
+/**
+ * amdgpu_sync_rings - sync ring to all registered fences
+ *
+ * @sync: sync object to use
+ * @ring: ring that needs sync
+ *
+ * Ensure that all registered fences are signaled before letting
+ * the ring continue. The caller must hold the ring lock.
+ */
+int amdgpu_sync_rings(struct amdgpu_sync *sync,
+		      struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	unsigned count = 0;
+	int i, r;
+
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+		struct amdgpu_fence *fence = sync->sync_to[i];
+		struct amdgpu_semaphore *semaphore;
+		struct amdgpu_ring *other = adev->rings[i];
+
+		/* check if we really need to sync */
+		if (!amdgpu_fence_need_sync(fence, ring))
+			continue;
+
+		/* prevent GPU deadlocks */
+		if (!other->ready) {
+			dev_err(adev->dev, "Syncing to a disabled ring!");
+			return -EINVAL;
+		}
+
+		if (count >= AMDGPU_NUM_SYNCS) {
+			/* not enough room, wait manually */
+			r = amdgpu_fence_wait(fence, false);
+			if (r)
+				return r;
+			continue;
+		}
+		r = amdgpu_semaphore_create(adev, &semaphore);
+		if (r)
+			return r;
+
+		sync->semaphores[count++] = semaphore;
+
+		/* allocate enough space for sync command */
+		r = amdgpu_ring_alloc(other, 16);
+		if (r)
+			return r;
+
+		/* emit the signal semaphore */
+		if (!amdgpu_semaphore_emit_signal(other, semaphore)) {
+			/* signaling wasn't successful wait manually */
+			amdgpu_ring_undo(other);
+			r = amdgpu_fence_wait(fence, false);
+			if (r)
+				return r;
+			continue;
+		}
+
+		/* we assume caller has already allocated space on waiters ring */
+		if (!amdgpu_semaphore_emit_wait(ring, semaphore)) {
+			/* waiting wasn't successful wait manually */
+			amdgpu_ring_undo(other);
+			r = amdgpu_fence_wait(fence, false);
+			if (r)
+				return r;
+			continue;
+		}
+
+		amdgpu_ring_commit(other);
+		amdgpu_fence_note_sync(fence, ring);
+	}
+
+	return 0;
+}
+
+/**
+ * amdgpu_sync_free - free the sync object
+ *
+ * @adev: amdgpu_device pointer
+ * @sync: sync object to use
+ * @fence: fence to use for the free
+ *
+ * Free the sync object by freeing all semaphores in it.
+ */
+void amdgpu_sync_free(struct amdgpu_device *adev,
+		      struct amdgpu_sync *sync,
+		      struct amdgpu_fence *fence)
+{
+	unsigned i;
+
+	for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
+		amdgpu_semaphore_free(adev, &sync->semaphores[i], fence);
+
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+		amdgpu_fence_unref(&sync->sync_to[i]);
+
+	amdgpu_fence_unref(&sync->last_vm_update);
+}

+ 552 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_test.c

@@ -0,0 +1,552 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Michel Dänzer
+ */
+#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+#include "amdgpu_uvd.h"
+#include "amdgpu_vce.h"
+
+/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
+static void amdgpu_do_test_moves(struct amdgpu_device *adev)
+{
+	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+	struct amdgpu_bo *vram_obj = NULL;
+	struct amdgpu_bo **gtt_obj = NULL;
+	uint64_t gtt_addr, vram_addr;
+	unsigned n, size;
+	int i, r;
+
+	size = 1024 * 1024;
+
+	/* Number of tests =
+	 * (Total GTT - IB pool - writeback page - ring buffers) / test size
+	 */
+	n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024;
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+		if (adev->rings[i])
+			n -= adev->rings[i]->ring_size;
+	if (adev->wb.wb_obj)
+		n -= AMDGPU_GPU_PAGE_SIZE;
+	if (adev->irq.ih.ring_obj)
+		n -= adev->irq.ih.ring_size;
+	n /= size;
+
+	gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
+	if (!gtt_obj) {
+		DRM_ERROR("Failed to allocate %d pointers\n", n);
+		r = 1;
+		goto out_cleanup;
+	}
+
+	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
+			     NULL, &vram_obj);
+	if (r) {
+		DRM_ERROR("Failed to create VRAM object\n");
+		goto out_cleanup;
+	}
+	r = amdgpu_bo_reserve(vram_obj, false);
+	if (unlikely(r != 0))
+		goto out_unref;
+	r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr);
+	if (r) {
+		DRM_ERROR("Failed to pin VRAM object\n");
+		goto out_unres;
+	}
+	for (i = 0; i < n; i++) {
+		void *gtt_map, *vram_map;
+		void **gtt_start, **gtt_end;
+		void **vram_start, **vram_end;
+		struct amdgpu_fence *fence = NULL;
+
+		r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
+				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
+		if (r) {
+			DRM_ERROR("Failed to create GTT object %d\n", i);
+			goto out_lclean;
+		}
+
+		r = amdgpu_bo_reserve(gtt_obj[i], false);
+		if (unlikely(r != 0))
+			goto out_lclean_unref;
+		r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gtt_addr);
+		if (r) {
+			DRM_ERROR("Failed to pin GTT object %d\n", i);
+			goto out_lclean_unres;
+		}
+
+		r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
+		if (r) {
+			DRM_ERROR("Failed to map GTT object %d\n", i);
+			goto out_lclean_unpin;
+		}
+
+		for (gtt_start = gtt_map, gtt_end = gtt_map + size;
+		     gtt_start < gtt_end;
+		     gtt_start++)
+			*gtt_start = gtt_start;
+
+		amdgpu_bo_kunmap(gtt_obj[i]);
+
+		r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
+				       size, NULL, &fence);
+
+		if (r) {
+			DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
+			goto out_lclean_unpin;
+		}
+
+		r = amdgpu_fence_wait(fence, false);
+		if (r) {
+			DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
+			goto out_lclean_unpin;
+		}
+
+		amdgpu_fence_unref(&fence);
+
+		r = amdgpu_bo_kmap(vram_obj, &vram_map);
+		if (r) {
+			DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
+			goto out_lclean_unpin;
+		}
+
+		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
+		     vram_start = vram_map, vram_end = vram_map + size;
+		     vram_start < vram_end;
+		     gtt_start++, vram_start++) {
+			if (*vram_start != gtt_start) {
+				DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
+					  "expected 0x%p (GTT/VRAM offset "
+					  "0x%16llx/0x%16llx)\n",
+					  i, *vram_start, gtt_start,
+					  (unsigned long long)
+					  (gtt_addr - adev->mc.gtt_start +
+					   (void*)gtt_start - gtt_map),
+					  (unsigned long long)
+					  (vram_addr - adev->mc.vram_start +
+					   (void*)gtt_start - gtt_map));
+				amdgpu_bo_kunmap(vram_obj);
+				goto out_lclean_unpin;
+			}
+			*vram_start = vram_start;
+		}
+
+		amdgpu_bo_kunmap(vram_obj);
+
+		r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
+				       size, NULL, &fence);
+
+		if (r) {
+			DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
+			goto out_lclean_unpin;
+		}
+
+		r = amdgpu_fence_wait(fence, false);
+		if (r) {
+			DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
+			goto out_lclean_unpin;
+		}
+
+		amdgpu_fence_unref(&fence);
+
+		r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
+		if (r) {
+			DRM_ERROR("Failed to map GTT object after copy %d\n", i);
+			goto out_lclean_unpin;
+		}
+
+		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
+		     vram_start = vram_map, vram_end = vram_map + size;
+		     gtt_start < gtt_end;
+		     gtt_start++, vram_start++) {
+			if (*gtt_start != vram_start) {
+				DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
+					  "expected 0x%p (VRAM/GTT offset "
+					  "0x%16llx/0x%16llx)\n",
+					  i, *gtt_start, vram_start,
+					  (unsigned long long)
+					  (vram_addr - adev->mc.vram_start +
+					   (void*)vram_start - vram_map),
+					  (unsigned long long)
+					  (gtt_addr - adev->mc.gtt_start +
+					   (void*)vram_start - vram_map));
+				amdgpu_bo_kunmap(gtt_obj[i]);
+				goto out_lclean_unpin;
+			}
+		}
+
+		amdgpu_bo_kunmap(gtt_obj[i]);
+
+		DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
+			 gtt_addr - adev->mc.gtt_start);
+		continue;
+
+out_lclean_unpin:
+		amdgpu_bo_unpin(gtt_obj[i]);
+out_lclean_unres:
+		amdgpu_bo_unreserve(gtt_obj[i]);
+out_lclean_unref:
+		amdgpu_bo_unref(&gtt_obj[i]);
+out_lclean:
+		for (--i; i >= 0; --i) {
+			amdgpu_bo_unpin(gtt_obj[i]);
+			amdgpu_bo_unreserve(gtt_obj[i]);
+			amdgpu_bo_unref(&gtt_obj[i]);
+		}
+		if (fence)
+			amdgpu_fence_unref(&fence);
+		break;
+	}
+
+	amdgpu_bo_unpin(vram_obj);
+out_unres:
+	amdgpu_bo_unreserve(vram_obj);
+out_unref:
+	amdgpu_bo_unref(&vram_obj);
+out_cleanup:
+	kfree(gtt_obj);
+	if (r) {
+		printk(KERN_WARNING "Error while testing BO move.\n");
+	}
+}
+
+void amdgpu_test_moves(struct amdgpu_device *adev)
+{
+	if (adev->mman.buffer_funcs)
+		amdgpu_do_test_moves(adev);
+}
+
+static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev,
+					     struct amdgpu_ring *ring,
+					     struct amdgpu_fence **fence)
+{
+	uint32_t handle = ring->idx ^ 0xdeafbeef;
+	int r;
+
+	if (ring == &adev->uvd.ring) {
+		r = amdgpu_uvd_get_create_msg(ring, handle, NULL);
+		if (r) {
+			DRM_ERROR("Failed to get dummy create msg\n");
+			return r;
+		}
+
+		r = amdgpu_uvd_get_destroy_msg(ring, handle, fence);
+		if (r) {
+			DRM_ERROR("Failed to get dummy destroy msg\n");
+			return r;
+		}
+
+	} else if (ring == &adev->vce.ring[0] ||
+		   ring == &adev->vce.ring[1]) {
+		r = amdgpu_vce_get_create_msg(ring, handle, NULL);
+		if (r) {
+			DRM_ERROR("Failed to get dummy create msg\n");
+			return r;
+		}
+
+		r = amdgpu_vce_get_destroy_msg(ring, handle, fence);
+		if (r) {
+			DRM_ERROR("Failed to get dummy destroy msg\n");
+			return r;
+		}
+
+	} else {
+		r = amdgpu_ring_lock(ring, 64);
+		if (r) {
+			DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
+			return r;
+		}
+		amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+		amdgpu_ring_unlock_commit(ring);
+	}
+	return 0;
+}
+
+void amdgpu_test_ring_sync(struct amdgpu_device *adev,
+			   struct amdgpu_ring *ringA,
+			   struct amdgpu_ring *ringB)
+{
+	struct amdgpu_fence *fence1 = NULL, *fence2 = NULL;
+	struct amdgpu_semaphore *semaphore = NULL;
+	int r;
+
+	r = amdgpu_semaphore_create(adev, &semaphore);
+	if (r) {
+		DRM_ERROR("Failed to create semaphore\n");
+		goto out_cleanup;
+	}
+
+	r = amdgpu_ring_lock(ringA, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
+		goto out_cleanup;
+	}
+	amdgpu_semaphore_emit_wait(ringA, semaphore);
+	amdgpu_ring_unlock_commit(ringA);
+
+	r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence1);
+	if (r)
+		goto out_cleanup;
+
+	r = amdgpu_ring_lock(ringA, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
+		goto out_cleanup;
+	}
+	amdgpu_semaphore_emit_wait(ringA, semaphore);
+	amdgpu_ring_unlock_commit(ringA);
+
+	r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence2);
+	if (r)
+		goto out_cleanup;
+
+	mdelay(1000);
+
+	if (amdgpu_fence_signaled(fence1)) {
+		DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
+		goto out_cleanup;
+	}
+
+	r = amdgpu_ring_lock(ringB, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring B %p\n", ringB);
+		goto out_cleanup;
+	}
+	amdgpu_semaphore_emit_signal(ringB, semaphore);
+	amdgpu_ring_unlock_commit(ringB);
+
+	r = amdgpu_fence_wait(fence1, false);
+	if (r) {
+		DRM_ERROR("Failed to wait for sync fence 1\n");
+		goto out_cleanup;
+	}
+
+	mdelay(1000);
+
+	if (amdgpu_fence_signaled(fence2)) {
+		DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
+		goto out_cleanup;
+	}
+
+	r = amdgpu_ring_lock(ringB, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring B %p\n", ringB);
+		goto out_cleanup;
+	}
+	amdgpu_semaphore_emit_signal(ringB, semaphore);
+	amdgpu_ring_unlock_commit(ringB);
+
+	r = amdgpu_fence_wait(fence2, false);
+	if (r) {
+		DRM_ERROR("Failed to wait for sync fence 1\n");
+		goto out_cleanup;
+	}
+
+out_cleanup:
+	amdgpu_semaphore_free(adev, &semaphore, NULL);
+
+	if (fence1)
+		amdgpu_fence_unref(&fence1);
+
+	if (fence2)
+		amdgpu_fence_unref(&fence2);
+
+	if (r)
+		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
+}
+
+static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
+			    struct amdgpu_ring *ringA,
+			    struct amdgpu_ring *ringB,
+			    struct amdgpu_ring *ringC)
+{
+	struct amdgpu_fence *fenceA = NULL, *fenceB = NULL;
+	struct amdgpu_semaphore *semaphore = NULL;
+	bool sigA, sigB;
+	int i, r;
+
+	r = amdgpu_semaphore_create(adev, &semaphore);
+	if (r) {
+		DRM_ERROR("Failed to create semaphore\n");
+		goto out_cleanup;
+	}
+
+	r = amdgpu_ring_lock(ringA, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
+		goto out_cleanup;
+	}
+	amdgpu_semaphore_emit_wait(ringA, semaphore);
+	amdgpu_ring_unlock_commit(ringA);
+
+	r = amdgpu_test_create_and_emit_fence(adev, ringA, &fenceA);
+	if (r)
+		goto out_cleanup;
+
+	r = amdgpu_ring_lock(ringB, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
+		goto out_cleanup;
+	}
+	amdgpu_semaphore_emit_wait(ringB, semaphore);
+	amdgpu_ring_unlock_commit(ringB);
+	r = amdgpu_test_create_and_emit_fence(adev, ringB, &fenceB);
+	if (r)
+		goto out_cleanup;
+
+	mdelay(1000);
+
+	if (amdgpu_fence_signaled(fenceA)) {
+		DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
+		goto out_cleanup;
+	}
+	if (amdgpu_fence_signaled(fenceB)) {
+		DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
+		goto out_cleanup;
+	}
+
+	r = amdgpu_ring_lock(ringC, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring B %p\n", ringC);
+		goto out_cleanup;
+	}
+	amdgpu_semaphore_emit_signal(ringC, semaphore);
+	amdgpu_ring_unlock_commit(ringC);
+
+	for (i = 0; i < 30; ++i) {
+		mdelay(100);
+		sigA = amdgpu_fence_signaled(fenceA);
+		sigB = amdgpu_fence_signaled(fenceB);
+		if (sigA || sigB)
+			break;
+	}
+
+	if (!sigA && !sigB) {
+		DRM_ERROR("Neither fence A nor B has been signaled\n");
+		goto out_cleanup;
+	} else if (sigA && sigB) {
+		DRM_ERROR("Both fence A and B has been signaled\n");
+		goto out_cleanup;
+	}
+
+	DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
+
+	r = amdgpu_ring_lock(ringC, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring B %p\n", ringC);
+		goto out_cleanup;
+	}
+	amdgpu_semaphore_emit_signal(ringC, semaphore);
+	amdgpu_ring_unlock_commit(ringC);
+
+	mdelay(1000);
+
+	r = amdgpu_fence_wait(fenceA, false);
+	if (r) {
+		DRM_ERROR("Failed to wait for sync fence A\n");
+		goto out_cleanup;
+	}
+	r = amdgpu_fence_wait(fenceB, false);
+	if (r) {
+		DRM_ERROR("Failed to wait for sync fence B\n");
+		goto out_cleanup;
+	}
+
+out_cleanup:
+	amdgpu_semaphore_free(adev, &semaphore, NULL);
+
+	if (fenceA)
+		amdgpu_fence_unref(&fenceA);
+
+	if (fenceB)
+		amdgpu_fence_unref(&fenceB);
+
+	if (r)
+		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
+}
+
+static bool amdgpu_test_sync_possible(struct amdgpu_ring *ringA,
+				      struct amdgpu_ring *ringB)
+{
+	if (ringA == &ringA->adev->vce.ring[0] &&
+	    ringB == &ringB->adev->vce.ring[1])
+		return false;
+
+	return true;
+}
+
+void amdgpu_test_syncing(struct amdgpu_device *adev)
+{
+	int i, j, k;
+
+	for (i = 1; i < AMDGPU_MAX_RINGS; ++i) {
+		struct amdgpu_ring *ringA = adev->rings[i];
+		if (!ringA || !ringA->ready)
+			continue;
+
+		for (j = 0; j < i; ++j) {
+			struct amdgpu_ring *ringB = adev->rings[j];
+			if (!ringB || !ringB->ready)
+				continue;
+
+			if (!amdgpu_test_sync_possible(ringA, ringB))
+				continue;
+
+			DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
+			amdgpu_test_ring_sync(adev, ringA, ringB);
+
+			DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
+			amdgpu_test_ring_sync(adev, ringB, ringA);
+
+			for (k = 0; k < j; ++k) {
+				struct amdgpu_ring *ringC = adev->rings[k];
+				if (!ringC || !ringC->ready)
+					continue;
+
+				if (!amdgpu_test_sync_possible(ringA, ringC))
+					continue;
+
+				if (!amdgpu_test_sync_possible(ringB, ringC))
+					continue;
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
+				amdgpu_test_ring_sync2(adev, ringA, ringB, ringC);
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
+				amdgpu_test_ring_sync2(adev, ringA, ringC, ringB);
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
+				amdgpu_test_ring_sync2(adev, ringB, ringA, ringC);
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
+				amdgpu_test_ring_sync2(adev, ringB, ringC, ringA);
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
+				amdgpu_test_ring_sync2(adev, ringC, ringA, ringB);
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
+				amdgpu_test_ring_sync2(adev, ringC, ringB, ringA);
+			}
+		}
+	}
+}

+ 208 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h

@@ -0,0 +1,208 @@
+#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _AMDGPU_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drmP.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM amdgpu
+#define TRACE_INCLUDE_FILE amdgpu_trace
+
+TRACE_EVENT(amdgpu_bo_create,
+	    TP_PROTO(struct amdgpu_bo *bo),
+	    TP_ARGS(bo),
+	    TP_STRUCT__entry(
+			     __field(struct amdgpu_bo *, bo)
+			     __field(u32, pages)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->bo = bo;
+			   __entry->pages = bo->tbo.num_pages;
+			   ),
+	    TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
+);
+
+TRACE_EVENT(amdgpu_cs,
+	    TP_PROTO(struct amdgpu_cs_parser *p, int i),
+	    TP_ARGS(p, i),
+	    TP_STRUCT__entry(
+			     __field(u32, ring)
+			     __field(u32, dw)
+			     __field(u32, fences)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->ring = p->ibs[i].ring->idx;
+			   __entry->dw = p->ibs[i].length_dw;
+			   __entry->fences = amdgpu_fence_count_emitted(
+				p->ibs[i].ring);
+			   ),
+	    TP_printk("ring=%u, dw=%u, fences=%u",
+		      __entry->ring, __entry->dw,
+		      __entry->fences)
+);
+
+TRACE_EVENT(amdgpu_vm_grab_id,
+	    TP_PROTO(unsigned vmid, int ring),
+	    TP_ARGS(vmid, ring),
+	    TP_STRUCT__entry(
+			     __field(u32, vmid)
+			     __field(u32, ring)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->vmid = vmid;
+			   __entry->ring = ring;
+			   ),
+	    TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring)
+);
+
+TRACE_EVENT(amdgpu_vm_bo_update,
+	    TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
+	    TP_ARGS(mapping),
+	    TP_STRUCT__entry(
+			     __field(u64, soffset)
+			     __field(u64, eoffset)
+			     __field(u32, flags)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->soffset = mapping->it.start;
+			   __entry->eoffset = mapping->it.last + 1;
+			   __entry->flags = mapping->flags;
+			   ),
+	    TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
+		      __entry->soffset, __entry->eoffset, __entry->flags)
+);
+
+TRACE_EVENT(amdgpu_vm_set_page,
+	    TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
+		     uint32_t incr, uint32_t flags),
+	    TP_ARGS(pe, addr, count, incr, flags),
+	    TP_STRUCT__entry(
+			     __field(u64, pe)
+			     __field(u64, addr)
+			     __field(u32, count)
+			     __field(u32, incr)
+			     __field(u32, flags)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->pe = pe;
+			   __entry->addr = addr;
+			   __entry->count = count;
+			   __entry->incr = incr;
+			   __entry->flags = flags;
+			   ),
+	    TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u",
+		      __entry->pe, __entry->addr, __entry->incr,
+		      __entry->flags, __entry->count)
+);
+
+TRACE_EVENT(amdgpu_vm_flush,
+	    TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id),
+	    TP_ARGS(pd_addr, ring, id),
+	    TP_STRUCT__entry(
+			     __field(u64, pd_addr)
+			     __field(u32, ring)
+			     __field(u32, id)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->pd_addr = pd_addr;
+			   __entry->ring = ring;
+			   __entry->id = id;
+			   ),
+	    TP_printk("pd_addr=%010Lx, ring=%u, id=%u",
+		      __entry->pd_addr, __entry->ring, __entry->id)
+);
+
+DECLARE_EVENT_CLASS(amdgpu_fence_request,
+
+	    TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
+
+	    TP_ARGS(dev, ring, seqno),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(int, ring)
+			     __field(u32, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			   __entry->ring = ring;
+			   __entry->seqno = seqno;
+			   ),
+
+	    TP_printk("dev=%u, ring=%d, seqno=%u",
+		      __entry->dev, __entry->ring, __entry->seqno)
+);
+
+DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_emit,
+
+	    TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
+
+	    TP_ARGS(dev, ring, seqno)
+);
+
+DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_begin,
+
+	    TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
+
+	    TP_ARGS(dev, ring, seqno)
+);
+
+DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_end,
+
+	    TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
+
+	    TP_ARGS(dev, ring, seqno)
+);
+
+DECLARE_EVENT_CLASS(amdgpu_semaphore_request,
+
+	    TP_PROTO(int ring, struct amdgpu_semaphore *sem),
+
+	    TP_ARGS(ring, sem),
+
+	    TP_STRUCT__entry(
+			     __field(int, ring)
+			     __field(signed, waiters)
+			     __field(uint64_t, gpu_addr)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->ring = ring;
+			   __entry->waiters = sem->waiters;
+			   __entry->gpu_addr = sem->gpu_addr;
+			   ),
+
+	    TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring,
+		      __entry->waiters, __entry->gpu_addr)
+);
+
+DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_signale,
+
+	    TP_PROTO(int ring, struct amdgpu_semaphore *sem),
+
+	    TP_ARGS(ring, sem)
+);
+
+DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_wait,
+
+	    TP_PROTO(int ring, struct amdgpu_semaphore *sem),
+
+	    TP_ARGS(ring, sem)
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>

+ 9 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c

@@ -0,0 +1,9 @@
+/* Copyright Red Hat Inc 2010.
+ * Author : Dave Airlie <airlied@redhat.com>
+ */
+#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+
+#define CREATE_TRACE_POINTS
+#include "amdgpu_trace.h"

+ 1215 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c

@@ -0,0 +1,1215 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Jerome Glisse <glisse@freedesktop.org>
+ *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ *    Dave Airlie
+ */
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+#include <ttm/ttm_page_alloc.h>
+#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/swiotlb.h>
+#include <linux/swap.h>
+#include <linux/pagemap.h>
+#include <linux/debugfs.h>
+#include "amdgpu.h"
+#include "bif/bif_4_1_d.h"
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
+static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
+
+static struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device *bdev)
+{
+	struct amdgpu_mman *mman;
+	struct amdgpu_device *adev;
+
+	mman = container_of(bdev, struct amdgpu_mman, bdev);
+	adev = container_of(mman, struct amdgpu_device, mman);
+	return adev;
+}
+
+
+/*
+ * Global memory.
+ */
+static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+	return ttm_mem_global_init(ref->object);
+}
+
+static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+	ttm_mem_global_release(ref->object);
+}
+
+static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
+{
+	struct drm_global_reference *global_ref;
+	int r;
+
+	adev->mman.mem_global_referenced = false;
+	global_ref = &adev->mman.mem_global_ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+	global_ref->size = sizeof(struct ttm_mem_global);
+	global_ref->init = &amdgpu_ttm_mem_global_init;
+	global_ref->release = &amdgpu_ttm_mem_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM memory accounting "
+			  "subsystem.\n");
+		return r;
+	}
+
+	adev->mman.bo_global_ref.mem_glob =
+		adev->mman.mem_global_ref.object;
+	global_ref = &adev->mman.bo_global_ref.ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_BO;
+	global_ref->size = sizeof(struct ttm_bo_global);
+	global_ref->init = &ttm_bo_global_init;
+	global_ref->release = &ttm_bo_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+		drm_global_item_unref(&adev->mman.mem_global_ref);
+		return r;
+	}
+
+	adev->mman.mem_global_referenced = true;
+	return 0;
+}
+
+static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
+{
+	if (adev->mman.mem_global_referenced) {
+		drm_global_item_unref(&adev->mman.bo_global_ref.ref);
+		drm_global_item_unref(&adev->mman.mem_global_ref);
+		adev->mman.mem_global_referenced = false;
+	}
+}
+
+static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+	return 0;
+}
+
+static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+				struct ttm_mem_type_manager *man)
+{
+	struct amdgpu_device *adev;
+
+	adev = amdgpu_get_adev(bdev);
+
+	switch (type) {
+	case TTM_PL_SYSTEM:
+		/* System memory */
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case TTM_PL_TT:
+		man->func = &ttm_bo_manager_func;
+		man->gpu_offset = adev->mc.gtt_start;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
+		break;
+	case TTM_PL_VRAM:
+		/* "On-card" video ram */
+		man->func = &ttm_bo_manager_func;
+		man->gpu_offset = adev->mc.vram_start;
+		man->flags = TTM_MEMTYPE_FLAG_FIXED |
+			     TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
+		man->default_caching = TTM_PL_FLAG_WC;
+		break;
+	case AMDGPU_PL_GDS:
+	case AMDGPU_PL_GWS:
+	case AMDGPU_PL_OA:
+		/* On-chip GDS memory*/
+		man->func = &ttm_bo_manager_func;
+		man->gpu_offset = 0;
+		man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA;
+		man->available_caching = TTM_PL_FLAG_UNCACHED;
+		man->default_caching = TTM_PL_FLAG_UNCACHED;
+		break;
+	default:
+		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
+				struct ttm_placement *placement)
+{
+	struct amdgpu_bo *rbo;
+	static struct ttm_place placements = {
+		.fpfn = 0,
+		.lpfn = 0,
+		.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
+	};
+
+	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
+		placement->placement = &placements;
+		placement->busy_placement = &placements;
+		placement->num_placement = 1;
+		placement->num_busy_placement = 1;
+		return;
+	}
+	rbo = container_of(bo, struct amdgpu_bo, tbo);
+	switch (bo->mem.mem_type) {
+	case TTM_PL_VRAM:
+		if (rbo->adev->mman.buffer_funcs_ring->ready == false)
+			amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU);
+		else
+			amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_GTT);
+		break;
+	case TTM_PL_TT:
+	default:
+		amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU);
+	}
+	*placement = rbo->placement;
+}
+
+static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+	struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo);
+
+	return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
+}
+
+static void amdgpu_move_null(struct ttm_buffer_object *bo,
+			     struct ttm_mem_reg *new_mem)
+{
+	struct ttm_mem_reg *old_mem = &bo->mem;
+
+	BUG_ON(old_mem->mm_node != NULL);
+	*old_mem = *new_mem;
+	new_mem->mm_node = NULL;
+}
+
+static int amdgpu_move_blit(struct ttm_buffer_object *bo,
+			bool evict, bool no_wait_gpu,
+			struct ttm_mem_reg *new_mem,
+			struct ttm_mem_reg *old_mem)
+{
+	struct amdgpu_device *adev;
+	struct amdgpu_ring *ring;
+	uint64_t old_start, new_start;
+	struct amdgpu_fence *fence;
+	int r;
+
+	adev = amdgpu_get_adev(bo->bdev);
+	ring = adev->mman.buffer_funcs_ring;
+	old_start = old_mem->start << PAGE_SHIFT;
+	new_start = new_mem->start << PAGE_SHIFT;
+
+	switch (old_mem->mem_type) {
+	case TTM_PL_VRAM:
+		old_start += adev->mc.vram_start;
+		break;
+	case TTM_PL_TT:
+		old_start += adev->mc.gtt_start;
+		break;
+	default:
+		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+		return -EINVAL;
+	}
+	switch (new_mem->mem_type) {
+	case TTM_PL_VRAM:
+		new_start += adev->mc.vram_start;
+		break;
+	case TTM_PL_TT:
+		new_start += adev->mc.gtt_start;
+		break;
+	default:
+		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+		return -EINVAL;
+	}
+	if (!ring->ready) {
+		DRM_ERROR("Trying to move memory with ring turned off.\n");
+		return -EINVAL;
+	}
+
+	BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
+
+	r = amdgpu_copy_buffer(ring, old_start, new_start,
+			       new_mem->num_pages * PAGE_SIZE, /* bytes */
+			       bo->resv, &fence);
+	/* FIXME: handle copy error */
+	r = ttm_bo_move_accel_cleanup(bo, &fence->base,
+				      evict, no_wait_gpu, new_mem);
+	amdgpu_fence_unref(&fence);
+	return r;
+}
+
+static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
+				bool evict, bool interruptible,
+				bool no_wait_gpu,
+				struct ttm_mem_reg *new_mem)
+{
+	struct amdgpu_device *adev;
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	struct ttm_mem_reg tmp_mem;
+	struct ttm_place placements;
+	struct ttm_placement placement;
+	int r;
+
+	adev = amdgpu_get_adev(bo->bdev);
+	tmp_mem = *new_mem;
+	tmp_mem.mm_node = NULL;
+	placement.num_placement = 1;
+	placement.placement = &placements;
+	placement.num_busy_placement = 1;
+	placement.busy_placement = &placements;
+	placements.fpfn = 0;
+	placements.lpfn = 0;
+	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
+			     interruptible, no_wait_gpu);
+	if (unlikely(r)) {
+		return r;
+	}
+
+	r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
+	if (unlikely(r)) {
+		goto out_cleanup;
+	}
+
+	r = ttm_tt_bind(bo->ttm, &tmp_mem);
+	if (unlikely(r)) {
+		goto out_cleanup;
+	}
+	r = amdgpu_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
+	if (unlikely(r)) {
+		goto out_cleanup;
+	}
+	r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+out_cleanup:
+	ttm_bo_mem_put(bo, &tmp_mem);
+	return r;
+}
+
+static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
+				bool evict, bool interruptible,
+				bool no_wait_gpu,
+				struct ttm_mem_reg *new_mem)
+{
+	struct amdgpu_device *adev;
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	struct ttm_mem_reg tmp_mem;
+	struct ttm_placement placement;
+	struct ttm_place placements;
+	int r;
+
+	adev = amdgpu_get_adev(bo->bdev);
+	tmp_mem = *new_mem;
+	tmp_mem.mm_node = NULL;
+	placement.num_placement = 1;
+	placement.placement = &placements;
+	placement.num_busy_placement = 1;
+	placement.busy_placement = &placements;
+	placements.fpfn = 0;
+	placements.lpfn = 0;
+	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
+			     interruptible, no_wait_gpu);
+	if (unlikely(r)) {
+		return r;
+	}
+	r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+	if (unlikely(r)) {
+		goto out_cleanup;
+	}
+	r = amdgpu_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
+	if (unlikely(r)) {
+		goto out_cleanup;
+	}
+out_cleanup:
+	ttm_bo_mem_put(bo, &tmp_mem);
+	return r;
+}
+
+static int amdgpu_bo_move(struct ttm_buffer_object *bo,
+			bool evict, bool interruptible,
+			bool no_wait_gpu,
+			struct ttm_mem_reg *new_mem)
+{
+	struct amdgpu_device *adev;
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	int r;
+
+	adev = amdgpu_get_adev(bo->bdev);
+	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+		amdgpu_move_null(bo, new_mem);
+		return 0;
+	}
+	if ((old_mem->mem_type == TTM_PL_TT &&
+	     new_mem->mem_type == TTM_PL_SYSTEM) ||
+	    (old_mem->mem_type == TTM_PL_SYSTEM &&
+	     new_mem->mem_type == TTM_PL_TT)) {
+		/* bind is enough */
+		amdgpu_move_null(bo, new_mem);
+		return 0;
+	}
+	if (adev->mman.buffer_funcs == NULL ||
+	    adev->mman.buffer_funcs_ring == NULL ||
+	    !adev->mman.buffer_funcs_ring->ready) {
+		/* use memcpy */
+		goto memcpy;
+	}
+
+	if (old_mem->mem_type == TTM_PL_VRAM &&
+	    new_mem->mem_type == TTM_PL_SYSTEM) {
+		r = amdgpu_move_vram_ram(bo, evict, interruptible,
+					no_wait_gpu, new_mem);
+	} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
+		   new_mem->mem_type == TTM_PL_VRAM) {
+		r = amdgpu_move_ram_vram(bo, evict, interruptible,
+					    no_wait_gpu, new_mem);
+	} else {
+		r = amdgpu_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
+	}
+
+	if (r) {
+memcpy:
+		r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+		if (r) {
+			return r;
+		}
+	}
+
+	/* update statistics */
+	atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
+	return 0;
+}
+
+static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	struct amdgpu_device *adev = amdgpu_get_adev(bdev);
+
+	mem->bus.addr = NULL;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	mem->bus.is_iomem = false;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+		/* system memory */
+		return 0;
+	case TTM_PL_TT:
+		break;
+	case TTM_PL_VRAM:
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		/* check if it's visible */
+		if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size)
+			return -EINVAL;
+		mem->bus.base = adev->mc.aper_base;
+		mem->bus.is_iomem = true;
+#ifdef __alpha__
+		/*
+		 * Alpha: use bus.addr to hold the ioremap() return,
+		 * so we can modify bus.base below.
+		 */
+		if (mem->placement & TTM_PL_FLAG_WC)
+			mem->bus.addr =
+				ioremap_wc(mem->bus.base + mem->bus.offset,
+					   mem->bus.size);
+		else
+			mem->bus.addr =
+				ioremap_nocache(mem->bus.base + mem->bus.offset,
+						mem->bus.size);
+
+		/*
+		 * Alpha: Use just the bus offset plus
+		 * the hose/domain memory base for bus.base.
+		 * It then can be used to build PTEs for VRAM
+		 * access, as done in ttm_bo_vm_fault().
+		 */
+		mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
+			adev->ddev->hose->dense_mem_base;
+#endif
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+/*
+ * TTM backend functions.
+ */
+struct amdgpu_ttm_tt {
+	struct ttm_dma_tt		ttm;
+	struct amdgpu_device		*adev;
+	u64				offset;
+	uint64_t			userptr;
+	struct mm_struct		*usermm;
+	uint32_t			userflags;
+};
+
+/* prepare the sg table with the user pages */
+static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
+{
+	struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
+	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+	unsigned pinned = 0, nents;
+	int r;
+
+	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
+	enum dma_data_direction direction = write ?
+		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
+	if (current->mm != gtt->usermm)
+		return -EPERM;
+
+	if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
+		/* check that we only pin down anonymous memory
+		   to prevent problems with writeback */
+		unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
+		struct vm_area_struct *vma;
+
+		vma = find_vma(gtt->usermm, gtt->userptr);
+		if (!vma || vma->vm_file || vma->vm_end < end)
+			return -EPERM;
+	}
+
+	do {
+		unsigned num_pages = ttm->num_pages - pinned;
+		uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
+		struct page **pages = ttm->pages + pinned;
+
+		r = get_user_pages(current, current->mm, userptr, num_pages,
+				   write, 0, pages, NULL);
+		if (r < 0)
+			goto release_pages;
+
+		pinned += r;
+
+	} while (pinned < ttm->num_pages);
+
+	r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
+				      ttm->num_pages << PAGE_SHIFT,
+				      GFP_KERNEL);
+	if (r)
+		goto release_sg;
+
+	r = -ENOMEM;
+	nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
+	if (nents != ttm->sg->nents)
+		goto release_sg;
+
+	drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
+					 gtt->ttm.dma_address, ttm->num_pages);
+
+	return 0;
+
+release_sg:
+	kfree(ttm->sg);
+
+release_pages:
+	release_pages(ttm->pages, pinned, 0);
+	return r;
+}
+
+static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
+{
+	struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
+	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+	struct sg_page_iter sg_iter;
+
+	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
+	enum dma_data_direction direction = write ?
+		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
+	/* double check that we don't free the table twice */
+	if (!ttm->sg->sgl)
+		return;
+
+	/* free the sg table and pages again */
+	dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
+
+	for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
+		struct page *page = sg_page_iter_page(&sg_iter);
+		if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
+			set_page_dirty(page);
+
+		mark_page_accessed(page);
+		page_cache_release(page);
+	}
+
+	sg_free_table(ttm->sg);
+}
+
+static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
+				   struct ttm_mem_reg *bo_mem)
+{
+	struct amdgpu_ttm_tt *gtt = (void*)ttm;
+	uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
+	int r;
+
+	if (gtt->userptr)
+		amdgpu_ttm_tt_pin_userptr(ttm);
+
+	gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
+	if (!ttm->num_pages) {
+		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+		     ttm->num_pages, bo_mem, ttm);
+	}
+
+	if (bo_mem->mem_type == AMDGPU_PL_GDS ||
+	    bo_mem->mem_type == AMDGPU_PL_GWS ||
+	    bo_mem->mem_type == AMDGPU_PL_OA)
+		return -EINVAL;
+
+	r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
+		ttm->pages, gtt->ttm.dma_address, flags);
+
+	if (r) {
+		DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
+			  ttm->num_pages, (unsigned)gtt->offset);
+		return r;
+	}
+	return 0;
+}
+
+static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
+{
+	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+
+	/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
+	if (gtt->adev->gart.ready)
+		amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
+
+	if (gtt->userptr)
+		amdgpu_ttm_tt_unpin_userptr(ttm);
+
+	return 0;
+}
+
+static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
+{
+	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+
+	ttm_dma_tt_fini(&gtt->ttm);
+	kfree(gtt);
+}
+
+static struct ttm_backend_func amdgpu_backend_func = {
+	.bind = &amdgpu_ttm_backend_bind,
+	.unbind = &amdgpu_ttm_backend_unbind,
+	.destroy = &amdgpu_ttm_backend_destroy,
+};
+
+static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
+				    unsigned long size, uint32_t page_flags,
+				    struct page *dummy_read_page)
+{
+	struct amdgpu_device *adev;
+	struct amdgpu_ttm_tt *gtt;
+
+	adev = amdgpu_get_adev(bdev);
+
+	gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
+	if (gtt == NULL) {
+		return NULL;
+	}
+	gtt->ttm.ttm.func = &amdgpu_backend_func;
+	gtt->adev = adev;
+	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
+		kfree(gtt);
+		return NULL;
+	}
+	return &gtt->ttm.ttm;
+}
+
+static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
+{
+	struct amdgpu_device *adev;
+	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+	unsigned i;
+	int r;
+	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+
+	if (ttm->state != tt_unpopulated)
+		return 0;
+
+	if (gtt && gtt->userptr) {
+		ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL);
+		if (!ttm->sg)
+			return -ENOMEM;
+
+		ttm->page_flags |= TTM_PAGE_FLAG_SG;
+		ttm->state = tt_unbound;
+		return 0;
+	}
+
+	if (slave && ttm->sg) {
+		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
+						 gtt->ttm.dma_address, ttm->num_pages);
+		ttm->state = tt_unbound;
+		return 0;
+	}
+
+	adev = amdgpu_get_adev(ttm->bdev);
+
+#ifdef CONFIG_SWIOTLB
+	if (swiotlb_nr_tbl()) {
+		return ttm_dma_populate(&gtt->ttm, adev->dev);
+	}
+#endif
+
+	r = ttm_pool_populate(ttm);
+	if (r) {
+		return r;
+	}
+
+	for (i = 0; i < ttm->num_pages; i++) {
+		gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i],
+						       0, PAGE_SIZE,
+						       PCI_DMA_BIDIRECTIONAL);
+		if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
+			while (--i) {
+				pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
+					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+				gtt->ttm.dma_address[i] = 0;
+			}
+			ttm_pool_unpopulate(ttm);
+			return -EFAULT;
+		}
+	}
+	return 0;
+}
+
+static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+	struct amdgpu_device *adev;
+	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+	unsigned i;
+	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+
+	if (gtt && gtt->userptr) {
+		kfree(ttm->sg);
+		ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
+		return;
+	}
+
+	if (slave)
+		return;
+
+	adev = amdgpu_get_adev(ttm->bdev);
+
+#ifdef CONFIG_SWIOTLB
+	if (swiotlb_nr_tbl()) {
+		ttm_dma_unpopulate(&gtt->ttm, adev->dev);
+		return;
+	}
+#endif
+
+	for (i = 0; i < ttm->num_pages; i++) {
+		if (gtt->ttm.dma_address[i]) {
+			pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
+				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+		}
+	}
+
+	ttm_pool_unpopulate(ttm);
+}
+
+int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
+			      uint32_t flags)
+{
+	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+
+	if (gtt == NULL)
+		return -EINVAL;
+
+	gtt->userptr = addr;
+	gtt->usermm = current->mm;
+	gtt->userflags = flags;
+	return 0;
+}
+
+bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm)
+{
+	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+
+	if (gtt == NULL)
+		return false;
+
+	return !!gtt->userptr;
+}
+
+bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
+{
+	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+
+	if (gtt == NULL)
+		return false;
+
+	return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
+}
+
+uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
+				 struct ttm_mem_reg *mem)
+{
+	uint32_t flags = 0;
+
+	if (mem && mem->mem_type != TTM_PL_SYSTEM)
+		flags |= AMDGPU_PTE_VALID;
+
+	if (mem && mem->mem_type == TTM_PL_TT)
+		flags |= AMDGPU_PTE_SYSTEM;
+
+	if (!ttm || ttm->caching_state == tt_cached)
+		flags |= AMDGPU_PTE_SNOOPED;
+
+	if (adev->asic_type >= CHIP_TOPAZ)
+		flags |= AMDGPU_PTE_EXECUTABLE;
+
+	flags |= AMDGPU_PTE_READABLE;
+
+	if (!amdgpu_ttm_tt_is_readonly(ttm))
+		flags |= AMDGPU_PTE_WRITEABLE;
+
+	return flags;
+}
+
+static struct ttm_bo_driver amdgpu_bo_driver = {
+	.ttm_tt_create = &amdgpu_ttm_tt_create,
+	.ttm_tt_populate = &amdgpu_ttm_tt_populate,
+	.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
+	.invalidate_caches = &amdgpu_invalidate_caches,
+	.init_mem_type = &amdgpu_init_mem_type,
+	.evict_flags = &amdgpu_evict_flags,
+	.move = &amdgpu_bo_move,
+	.verify_access = &amdgpu_verify_access,
+	.move_notify = &amdgpu_bo_move_notify,
+	.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
+	.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
+	.io_mem_free = &amdgpu_ttm_io_mem_free,
+};
+
+int amdgpu_ttm_init(struct amdgpu_device *adev)
+{
+	int r;
+
+	r = amdgpu_ttm_global_init(adev);
+	if (r) {
+		return r;
+	}
+	/* No others user of address space so set it to 0 */
+	r = ttm_bo_device_init(&adev->mman.bdev,
+			       adev->mman.bo_global_ref.ref.object,
+			       &amdgpu_bo_driver,
+			       adev->ddev->anon_inode->i_mapping,
+			       DRM_FILE_PAGE_OFFSET,
+			       adev->need_dma32);
+	if (r) {
+		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+		return r;
+	}
+	adev->mman.initialized = true;
+	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
+				adev->mc.real_vram_size >> PAGE_SHIFT);
+	if (r) {
+		DRM_ERROR("Failed initializing VRAM heap.\n");
+		return r;
+	}
+	/* Change the size here instead of the init above so only lpfn is affected */
+	amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
+
+	r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
+			     AMDGPU_GEM_DOMAIN_VRAM, 0,
+			     NULL, &adev->stollen_vga_memory);
+	if (r) {
+		return r;
+	}
+	r = amdgpu_bo_reserve(adev->stollen_vga_memory, false);
+	if (r)
+		return r;
+	r = amdgpu_bo_pin(adev->stollen_vga_memory, AMDGPU_GEM_DOMAIN_VRAM, NULL);
+	amdgpu_bo_unreserve(adev->stollen_vga_memory);
+	if (r) {
+		amdgpu_bo_unref(&adev->stollen_vga_memory);
+		return r;
+	}
+	DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
+		 (unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
+	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT,
+				adev->mc.gtt_size >> PAGE_SHIFT);
+	if (r) {
+		DRM_ERROR("Failed initializing GTT heap.\n");
+		return r;
+	}
+	DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
+		 (unsigned)(adev->mc.gtt_size / (1024 * 1024)));
+
+	adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
+	adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
+	adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
+	adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT;
+	adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT;
+	adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT;
+	adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT;
+	adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT;
+	adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT;
+	/* GDS Memory */
+	r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
+				adev->gds.mem.total_size >> PAGE_SHIFT);
+	if (r) {
+		DRM_ERROR("Failed initializing GDS heap.\n");
+		return r;
+	}
+
+	/* GWS */
+	r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
+				adev->gds.gws.total_size >> PAGE_SHIFT);
+	if (r) {
+		DRM_ERROR("Failed initializing gws heap.\n");
+		return r;
+	}
+
+	/* OA */
+	r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
+				adev->gds.oa.total_size >> PAGE_SHIFT);
+	if (r) {
+		DRM_ERROR("Failed initializing oa heap.\n");
+		return r;
+	}
+
+	r = amdgpu_ttm_debugfs_init(adev);
+	if (r) {
+		DRM_ERROR("Failed to init debugfs\n");
+		return r;
+	}
+	return 0;
+}
+
+void amdgpu_ttm_fini(struct amdgpu_device *adev)
+{
+	int r;
+
+	if (!adev->mman.initialized)
+		return;
+	amdgpu_ttm_debugfs_fini(adev);
+	if (adev->stollen_vga_memory) {
+		r = amdgpu_bo_reserve(adev->stollen_vga_memory, false);
+		if (r == 0) {
+			amdgpu_bo_unpin(adev->stollen_vga_memory);
+			amdgpu_bo_unreserve(adev->stollen_vga_memory);
+		}
+		amdgpu_bo_unref(&adev->stollen_vga_memory);
+	}
+	ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
+	ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
+	ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
+	ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
+	ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
+	ttm_bo_device_release(&adev->mman.bdev);
+	amdgpu_gart_fini(adev);
+	amdgpu_ttm_global_fini(adev);
+	adev->mman.initialized = false;
+	DRM_INFO("amdgpu: ttm finalized\n");
+}
+
+/* this should only be called at bootup or when userspace
+ * isn't running */
+void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size)
+{
+	struct ttm_mem_type_manager *man;
+
+	if (!adev->mman.initialized)
+		return;
+
+	man = &adev->mman.bdev.man[TTM_PL_VRAM];
+	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
+	man->size = size >> PAGE_SHIFT;
+}
+
+int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv;
+	struct amdgpu_device *adev;
+
+	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+		return -EINVAL;
+
+	file_priv = filp->private_data;
+	adev = file_priv->minor->dev->dev_private;
+	if (adev == NULL)
+		return -EINVAL;
+
+	return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
+}
+
+int amdgpu_copy_buffer(struct amdgpu_ring *ring,
+		       uint64_t src_offset,
+		       uint64_t dst_offset,
+		       uint32_t byte_count,
+		       struct reservation_object *resv,
+		       struct amdgpu_fence **fence)
+{
+	struct amdgpu_device *adev = ring->adev;
+	struct amdgpu_sync sync;
+	uint32_t max_bytes;
+	unsigned num_loops, num_dw;
+	unsigned i;
+	int r;
+
+	/* sync other rings */
+	amdgpu_sync_create(&sync);
+	if (resv) {
+		r = amdgpu_sync_resv(adev, &sync, resv, false);
+		if (r) {
+			DRM_ERROR("sync failed (%d).\n", r);
+			amdgpu_sync_free(adev, &sync, NULL);
+			return r;
+		}
+	}
+
+	max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
+	num_loops = DIV_ROUND_UP(byte_count, max_bytes);
+	num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
+
+	/* for fence and sync */
+	num_dw += 64 + AMDGPU_NUM_SYNCS * 8;
+
+	r = amdgpu_ring_lock(ring, num_dw);
+	if (r) {
+		DRM_ERROR("ring lock failed (%d).\n", r);
+		amdgpu_sync_free(adev, &sync, NULL);
+		return r;
+	}
+
+	amdgpu_sync_rings(&sync, ring);
+
+	for (i = 0; i < num_loops; i++) {
+		uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
+
+		amdgpu_emit_copy_buffer(adev, ring, src_offset, dst_offset,
+					cur_size_in_bytes);
+
+		src_offset += cur_size_in_bytes;
+		dst_offset += cur_size_in_bytes;
+		byte_count -= cur_size_in_bytes;
+	}
+
+	r = amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_MOVE, fence);
+	if (r) {
+		amdgpu_ring_unlock_undo(ring);
+		amdgpu_sync_free(adev, &sync, NULL);
+		return r;
+	}
+
+	amdgpu_ring_unlock_commit(ring);
+	amdgpu_sync_free(adev, &sync, *fence);
+
+	return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	unsigned ttm_pl = *(int *)node->info_ent->data;
+	struct drm_device *dev = node->minor->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct drm_mm *mm = (struct drm_mm *)adev->mman.bdev.man[ttm_pl].priv;
+	int ret;
+	struct ttm_bo_global *glob = adev->mman.bdev.glob;
+
+	spin_lock(&glob->lru_lock);
+	ret = drm_mm_dump_table(m, mm);
+	spin_unlock(&glob->lru_lock);
+	return ret;
+}
+
+static int ttm_pl_vram = TTM_PL_VRAM;
+static int ttm_pl_tt = TTM_PL_TT;
+
+static struct drm_info_list amdgpu_ttm_debugfs_list[] = {
+	{"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
+	{"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
+	{"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
+#ifdef CONFIG_SWIOTLB
+	{"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
+#endif
+};
+
+static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
+				    size_t size, loff_t *pos)
+{
+	struct amdgpu_device *adev = f->f_inode->i_private;
+	ssize_t result = 0;
+	int r;
+
+	if (size & 0x3 || *pos & 0x3)
+		return -EINVAL;
+
+	while (size) {
+		unsigned long flags;
+		uint32_t value;
+
+		if (*pos >= adev->mc.mc_vram_size)
+			return result;
+
+		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+		WREG32(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
+		WREG32(mmMM_INDEX_HI, *pos >> 31);
+		value = RREG32(mmMM_DATA);
+		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+
+		r = put_user(value, (uint32_t *)buf);
+		if (r)
+			return r;
+
+		result += 4;
+		buf += 4;
+		*pos += 4;
+		size -= 4;
+	}
+
+	return result;
+}
+
+static const struct file_operations amdgpu_ttm_vram_fops = {
+	.owner = THIS_MODULE,
+	.read = amdgpu_ttm_vram_read,
+	.llseek = default_llseek
+};
+
+static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
+				   size_t size, loff_t *pos)
+{
+	struct amdgpu_device *adev = f->f_inode->i_private;
+	ssize_t result = 0;
+	int r;
+
+	while (size) {
+		loff_t p = *pos / PAGE_SIZE;
+		unsigned off = *pos & ~PAGE_MASK;
+		size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
+		struct page *page;
+		void *ptr;
+
+		if (p >= adev->gart.num_cpu_pages)
+			return result;
+
+		page = adev->gart.pages[p];
+		if (page) {
+			ptr = kmap(page);
+			ptr += off;
+
+			r = copy_to_user(buf, ptr, cur_size);
+			kunmap(adev->gart.pages[p]);
+		} else
+			r = clear_user(buf, cur_size);
+
+		if (r)
+			return -EFAULT;
+
+		result += cur_size;
+		buf += cur_size;
+		*pos += cur_size;
+		size -= cur_size;
+	}
+
+	return result;
+}
+
+static const struct file_operations amdgpu_ttm_gtt_fops = {
+	.owner = THIS_MODULE,
+	.read = amdgpu_ttm_gtt_read,
+	.llseek = default_llseek
+};
+
+#endif
+
+static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	unsigned count;
+
+	struct drm_minor *minor = adev->ddev->primary;
+	struct dentry *ent, *root = minor->debugfs_root;
+
+	ent = debugfs_create_file("amdgpu_vram", S_IFREG | S_IRUGO, root,
+				  adev, &amdgpu_ttm_vram_fops);
+	if (IS_ERR(ent))
+		return PTR_ERR(ent);
+	i_size_write(ent->d_inode, adev->mc.mc_vram_size);
+	adev->mman.vram = ent;
+
+	ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
+				  adev, &amdgpu_ttm_gtt_fops);
+	if (IS_ERR(ent))
+		return PTR_ERR(ent);
+	i_size_write(ent->d_inode, adev->mc.gtt_size);
+	adev->mman.gtt = ent;
+
+	count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
+
+#ifdef CONFIG_SWIOTLB
+	if (!swiotlb_nr_tbl())
+		--count;
+#endif
+
+	return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
+#else
+
+	return 0;
+#endif
+}
+
+static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+
+	debugfs_remove(adev->mman.vram);
+	adev->mman.vram = NULL;
+
+	debugfs_remove(adev->mman.gtt);
+	adev->mman.gtt = NULL;
+#endif
+}

+ 317 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c

@@ -0,0 +1,317 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_ucode.h"
+
+static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr)
+{
+	DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
+	DRM_DEBUG("header_size_bytes: %u\n", le32_to_cpu(hdr->header_size_bytes));
+	DRM_DEBUG("header_version_major: %u\n", le16_to_cpu(hdr->header_version_major));
+	DRM_DEBUG("header_version_minor: %u\n", le16_to_cpu(hdr->header_version_minor));
+	DRM_DEBUG("ip_version_major: %u\n", le16_to_cpu(hdr->ip_version_major));
+	DRM_DEBUG("ip_version_minor: %u\n", le16_to_cpu(hdr->ip_version_minor));
+	DRM_DEBUG("ucode_version: 0x%08x\n", le32_to_cpu(hdr->ucode_version));
+	DRM_DEBUG("ucode_size_bytes: %u\n", le32_to_cpu(hdr->ucode_size_bytes));
+	DRM_DEBUG("ucode_array_offset_bytes: %u\n",
+		  le32_to_cpu(hdr->ucode_array_offset_bytes));
+	DRM_DEBUG("crc32: 0x%08x\n", le32_to_cpu(hdr->crc32));
+}
+
+void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr)
+{
+	uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+	uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+	DRM_DEBUG("MC\n");
+	amdgpu_ucode_print_common_hdr(hdr);
+
+	if (version_major == 1) {
+		const struct mc_firmware_header_v1_0 *mc_hdr =
+			container_of(hdr, struct mc_firmware_header_v1_0, header);
+
+		DRM_DEBUG("io_debug_size_bytes: %u\n",
+			  le32_to_cpu(mc_hdr->io_debug_size_bytes));
+		DRM_DEBUG("io_debug_array_offset_bytes: %u\n",
+			  le32_to_cpu(mc_hdr->io_debug_array_offset_bytes));
+	} else {
+		DRM_ERROR("Unknown MC ucode version: %u.%u\n", version_major, version_minor);
+	}
+}
+
+void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr)
+{
+	uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+	uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+	DRM_DEBUG("SMC\n");
+	amdgpu_ucode_print_common_hdr(hdr);
+
+	if (version_major == 1) {
+		const struct smc_firmware_header_v1_0 *smc_hdr =
+			container_of(hdr, struct smc_firmware_header_v1_0, header);
+
+		DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(smc_hdr->ucode_start_addr));
+	} else {
+		DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major, version_minor);
+	}
+}
+
+void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr)
+{
+	uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+	uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+	DRM_DEBUG("GFX\n");
+	amdgpu_ucode_print_common_hdr(hdr);
+
+	if (version_major == 1) {
+		const struct gfx_firmware_header_v1_0 *gfx_hdr =
+			container_of(hdr, struct gfx_firmware_header_v1_0, header);
+
+		DRM_DEBUG("ucode_feature_version: %u\n",
+			  le32_to_cpu(gfx_hdr->ucode_feature_version));
+		DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(gfx_hdr->jt_offset));
+		DRM_DEBUG("jt_size: %u\n", le32_to_cpu(gfx_hdr->jt_size));
+	} else {
+		DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor);
+	}
+}
+
+void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
+{
+	uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+	uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+	DRM_DEBUG("RLC\n");
+	amdgpu_ucode_print_common_hdr(hdr);
+
+	if (version_major == 1) {
+		const struct rlc_firmware_header_v1_0 *rlc_hdr =
+			container_of(hdr, struct rlc_firmware_header_v1_0, header);
+
+		DRM_DEBUG("ucode_feature_version: %u\n",
+			  le32_to_cpu(rlc_hdr->ucode_feature_version));
+		DRM_DEBUG("save_and_restore_offset: %u\n",
+			  le32_to_cpu(rlc_hdr->save_and_restore_offset));
+		DRM_DEBUG("clear_state_descriptor_offset: %u\n",
+			  le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
+		DRM_DEBUG("avail_scratch_ram_locations: %u\n",
+			  le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
+		DRM_DEBUG("master_pkt_description_offset: %u\n",
+			  le32_to_cpu(rlc_hdr->master_pkt_description_offset));
+	} else if (version_major == 2) {
+		const struct rlc_firmware_header_v2_0 *rlc_hdr =
+			container_of(hdr, struct rlc_firmware_header_v2_0, header);
+
+		DRM_DEBUG("ucode_feature_version: %u\n",
+			  le32_to_cpu(rlc_hdr->ucode_feature_version));
+		DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(rlc_hdr->jt_offset));
+		DRM_DEBUG("jt_size: %u\n", le32_to_cpu(rlc_hdr->jt_size));
+		DRM_DEBUG("save_and_restore_offset: %u\n",
+			  le32_to_cpu(rlc_hdr->save_and_restore_offset));
+		DRM_DEBUG("clear_state_descriptor_offset: %u\n",
+			  le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
+		DRM_DEBUG("avail_scratch_ram_locations: %u\n",
+			  le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
+		DRM_DEBUG("reg_restore_list_size: %u\n",
+			  le32_to_cpu(rlc_hdr->reg_restore_list_size));
+		DRM_DEBUG("reg_list_format_start: %u\n",
+			  le32_to_cpu(rlc_hdr->reg_list_format_start));
+		DRM_DEBUG("reg_list_format_separate_start: %u\n",
+			  le32_to_cpu(rlc_hdr->reg_list_format_separate_start));
+		DRM_DEBUG("starting_offsets_start: %u\n",
+			  le32_to_cpu(rlc_hdr->starting_offsets_start));
+		DRM_DEBUG("reg_list_format_size_bytes: %u\n",
+			  le32_to_cpu(rlc_hdr->reg_list_format_size_bytes));
+		DRM_DEBUG("reg_list_format_array_offset_bytes: %u\n",
+			  le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
+		DRM_DEBUG("reg_list_size_bytes: %u\n",
+			  le32_to_cpu(rlc_hdr->reg_list_size_bytes));
+		DRM_DEBUG("reg_list_array_offset_bytes: %u\n",
+			  le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
+		DRM_DEBUG("reg_list_format_separate_size_bytes: %u\n",
+			  le32_to_cpu(rlc_hdr->reg_list_format_separate_size_bytes));
+		DRM_DEBUG("reg_list_format_separate_array_offset_bytes: %u\n",
+			  le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes));
+		DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
+			  le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
+		DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
+			  le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
+	} else {
+		DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor);
+	}
+}
+
+void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr)
+{
+	uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+	uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+	DRM_DEBUG("SDMA\n");
+	amdgpu_ucode_print_common_hdr(hdr);
+
+	if (version_major == 1) {
+		const struct sdma_firmware_header_v1_0 *sdma_hdr =
+			container_of(hdr, struct sdma_firmware_header_v1_0, header);
+
+		DRM_DEBUG("ucode_feature_version: %u\n",
+			  le32_to_cpu(sdma_hdr->ucode_feature_version));
+		DRM_DEBUG("ucode_change_version: %u\n",
+			  le32_to_cpu(sdma_hdr->ucode_change_version));
+		DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(sdma_hdr->jt_offset));
+		DRM_DEBUG("jt_size: %u\n", le32_to_cpu(sdma_hdr->jt_size));
+		if (version_minor >= 1) {
+			const struct sdma_firmware_header_v1_1 *sdma_v1_1_hdr =
+				container_of(sdma_hdr, struct sdma_firmware_header_v1_1, v1_0);
+			DRM_DEBUG("digest_size: %u\n", le32_to_cpu(sdma_v1_1_hdr->digest_size));
+		}
+	} else {
+		DRM_ERROR("Unknown SDMA ucode version: %u.%u\n",
+			  version_major, version_minor);
+	}
+}
+
+int amdgpu_ucode_validate(const struct firmware *fw)
+{
+	const struct common_firmware_header *hdr =
+		(const struct common_firmware_header *)fw->data;
+
+	if (fw->size == le32_to_cpu(hdr->size_bytes))
+		return 0;
+
+	return -EINVAL;
+}
+
+bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
+				uint16_t hdr_major, uint16_t hdr_minor)
+{
+	if ((hdr->common.header_version_major == hdr_major) &&
+		(hdr->common.header_version_minor == hdr_minor))
+		return false;
+	return true;
+}
+
+static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode,
+				uint64_t mc_addr, void *kptr)
+{
+	const struct common_firmware_header *header = NULL;
+
+	if (NULL == ucode->fw)
+		return 0;
+
+	ucode->mc_addr = mc_addr;
+	ucode->kaddr = kptr;
+
+	header = (const struct common_firmware_header *)ucode->fw->data;
+	memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
+		le32_to_cpu(header->ucode_array_offset_bytes)),
+		le32_to_cpu(header->ucode_size_bytes));
+
+	return 0;
+}
+
+int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
+{
+	struct amdgpu_bo **bo = &adev->firmware.fw_buf;
+	uint64_t fw_mc_addr;
+	void *fw_buf_ptr = NULL;
+	uint64_t fw_offset = 0;
+	int i, err;
+	struct amdgpu_firmware_info *ucode = NULL;
+	const struct common_firmware_header *header = NULL;
+
+	err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
+			AMDGPU_GEM_DOMAIN_GTT, 0, NULL, bo);
+	if (err) {
+		dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
+		err = -ENOMEM;
+		goto failed;
+	}
+
+	err = amdgpu_bo_reserve(*bo, false);
+	if (err) {
+		amdgpu_bo_unref(bo);
+		dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
+		goto failed;
+	}
+
+	err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr);
+	if (err) {
+		amdgpu_bo_unreserve(*bo);
+		amdgpu_bo_unref(bo);
+		dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
+		goto failed;
+	}
+
+	err = amdgpu_bo_kmap(*bo, &fw_buf_ptr);
+	if (err) {
+		dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
+		amdgpu_bo_unpin(*bo);
+		amdgpu_bo_unreserve(*bo);
+		amdgpu_bo_unref(bo);
+		goto failed;
+	}
+
+	amdgpu_bo_unreserve(*bo);
+
+	fw_offset = 0;
+	for (i = 0; i < AMDGPU_UCODE_ID_MAXIMUM; i++) {
+		ucode = &adev->firmware.ucode[i];
+		if (ucode->fw) {
+			header = (const struct common_firmware_header *)ucode->fw->data;
+			amdgpu_ucode_init_single_fw(ucode, fw_mc_addr + fw_offset,
+						    fw_buf_ptr + fw_offset);
+			fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+		}
+	}
+
+failed:
+	if (err)
+		adev->firmware.smu_load = false;
+
+	return err;
+}
+
+int amdgpu_ucode_fini_bo(struct amdgpu_device *adev)
+{
+	int i;
+	struct amdgpu_firmware_info *ucode = NULL;
+
+	for (i = 0; i < AMDGPU_UCODE_ID_MAXIMUM; i++) {
+		ucode = &adev->firmware.ucode[i];
+		if (ucode->fw) {
+			ucode->mc_addr = 0;
+			ucode->kaddr = NULL;
+		}
+	}
+	amdgpu_bo_unref(&adev->firmware.fw_buf);
+	adev->firmware.fw_buf = NULL;
+
+	return 0;
+}

+ 176 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h

@@ -0,0 +1,176 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_UCODE_H__
+#define __AMDGPU_UCODE_H__
+
+struct common_firmware_header {
+	uint32_t size_bytes; /* size of the entire header+image(s) in bytes */
+	uint32_t header_size_bytes; /* size of just the header in bytes */
+	uint16_t header_version_major; /* header version */
+	uint16_t header_version_minor; /* header version */
+	uint16_t ip_version_major; /* IP version */
+	uint16_t ip_version_minor; /* IP version */
+	uint32_t ucode_version;
+	uint32_t ucode_size_bytes; /* size of ucode in bytes */
+	uint32_t ucode_array_offset_bytes; /* payload offset from the start of the header */
+	uint32_t crc32;  /* crc32 checksum of the payload */
+};
+
+/* version_major=1, version_minor=0 */
+struct mc_firmware_header_v1_0 {
+	struct common_firmware_header header;
+	uint32_t io_debug_size_bytes; /* size of debug array in dwords */
+	uint32_t io_debug_array_offset_bytes; /* payload offset from the start of the header */
+};
+
+/* version_major=1, version_minor=0 */
+struct smc_firmware_header_v1_0 {
+	struct common_firmware_header header;
+	uint32_t ucode_start_addr;
+};
+
+/* version_major=1, version_minor=0 */
+struct gfx_firmware_header_v1_0 {
+	struct common_firmware_header header;
+	uint32_t ucode_feature_version;
+	uint32_t jt_offset; /* jt location */
+	uint32_t jt_size;  /* size of jt */
+};
+
+/* version_major=1, version_minor=0 */
+struct rlc_firmware_header_v1_0 {
+	struct common_firmware_header header;
+	uint32_t ucode_feature_version;
+	uint32_t save_and_restore_offset;
+	uint32_t clear_state_descriptor_offset;
+	uint32_t avail_scratch_ram_locations;
+	uint32_t master_pkt_description_offset;
+};
+
+/* version_major=2, version_minor=0 */
+struct rlc_firmware_header_v2_0 {
+	struct common_firmware_header header;
+	uint32_t ucode_feature_version;
+	uint32_t jt_offset; /* jt location */
+	uint32_t jt_size;  /* size of jt */
+	uint32_t save_and_restore_offset;
+	uint32_t clear_state_descriptor_offset;
+	uint32_t avail_scratch_ram_locations;
+	uint32_t reg_restore_list_size;
+	uint32_t reg_list_format_start;
+	uint32_t reg_list_format_separate_start;
+	uint32_t starting_offsets_start;
+	uint32_t reg_list_format_size_bytes; /* size of reg list format array in bytes */
+	uint32_t reg_list_format_array_offset_bytes; /* payload offset from the start of the header */
+	uint32_t reg_list_size_bytes; /* size of reg list array in bytes */
+	uint32_t reg_list_array_offset_bytes; /* payload offset from the start of the header */
+	uint32_t reg_list_format_separate_size_bytes; /* size of reg list format array in bytes */
+	uint32_t reg_list_format_separate_array_offset_bytes; /* payload offset from the start of the header */
+	uint32_t reg_list_separate_size_bytes; /* size of reg list array in bytes */
+	uint32_t reg_list_separate_array_offset_bytes; /* payload offset from the start of the header */
+};
+
+/* version_major=1, version_minor=0 */
+struct sdma_firmware_header_v1_0 {
+	struct common_firmware_header header;
+	uint32_t ucode_feature_version;
+	uint32_t ucode_change_version;
+	uint32_t jt_offset; /* jt location */
+	uint32_t jt_size; /* size of jt */
+};
+
+/* version_major=1, version_minor=1 */
+struct sdma_firmware_header_v1_1 {
+	struct sdma_firmware_header_v1_0 v1_0;
+	uint32_t digest_size;
+};
+
+/* header is fixed size */
+union amdgpu_firmware_header {
+	struct common_firmware_header common;
+	struct mc_firmware_header_v1_0 mc;
+	struct smc_firmware_header_v1_0 smc;
+	struct gfx_firmware_header_v1_0 gfx;
+	struct rlc_firmware_header_v1_0 rlc;
+	struct rlc_firmware_header_v2_0 rlc_v2_0;
+	struct sdma_firmware_header_v1_0 sdma;
+	struct sdma_firmware_header_v1_1 sdma_v1_1;
+	uint8_t raw[0x100];
+};
+
+/*
+ * fw loading support
+ */
+enum AMDGPU_UCODE_ID {
+	AMDGPU_UCODE_ID_SDMA0 = 0,
+	AMDGPU_UCODE_ID_SDMA1,
+	AMDGPU_UCODE_ID_CP_CE,
+	AMDGPU_UCODE_ID_CP_PFP,
+	AMDGPU_UCODE_ID_CP_ME,
+	AMDGPU_UCODE_ID_CP_MEC1,
+	AMDGPU_UCODE_ID_CP_MEC2,
+	AMDGPU_UCODE_ID_RLC_G,
+	AMDGPU_UCODE_ID_MAXIMUM,
+};
+
+/* engine firmware status */
+enum AMDGPU_UCODE_STATUS {
+	AMDGPU_UCODE_STATUS_INVALID,
+	AMDGPU_UCODE_STATUS_NOT_LOADED,
+	AMDGPU_UCODE_STATUS_LOADED,
+};
+
+/* conform to smu_ucode_xfer_cz.h */
+#define AMDGPU_SDMA0_UCODE_LOADED	0x00000001
+#define AMDGPU_SDMA1_UCODE_LOADED	0x00000002
+#define AMDGPU_CPCE_UCODE_LOADED	0x00000004
+#define AMDGPU_CPPFP_UCODE_LOADED	0x00000008
+#define AMDGPU_CPME_UCODE_LOADED	0x00000010
+#define AMDGPU_CPMEC1_UCODE_LOADED	0x00000020
+#define AMDGPU_CPMEC2_UCODE_LOADED	0x00000040
+#define AMDGPU_CPRLC_UCODE_LOADED	0x00000100
+
+/* amdgpu firmware info */
+struct amdgpu_firmware_info {
+	/* ucode ID */
+	enum AMDGPU_UCODE_ID ucode_id;
+	/* request_firmware */
+	const struct firmware *fw;
+	/* starting mc address */
+	uint64_t mc_addr;
+	/* kernel linear address */
+	void *kaddr;
+};
+
+void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
+void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr);
+void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr);
+void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr);
+void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
+int amdgpu_ucode_validate(const struct firmware *fw);
+bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
+				uint16_t hdr_major, uint16_t hdr_minor);
+int amdgpu_ucode_init_bo(struct amdgpu_device *adev);
+int amdgpu_ucode_fini_bo(struct amdgpu_device *adev);
+
+#endif

+ 984 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c

@@ -0,0 +1,984 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Christian König <deathsimple@vodafone.de>
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_uvd.h"
+#include "cikd.h"
+#include "uvd/uvd_4_2_d.h"
+
+/* 1 second timeout */
+#define UVD_IDLE_TIMEOUT_MS	1000
+
+/* Firmware Names */
+#ifdef CONFIG_DRM_AMDGPU_CIK
+#define FIRMWARE_BONAIRE	"radeon/bonaire_uvd.bin"
+#define FIRMWARE_KABINI 	"radeon/kabini_uvd.bin"
+#define FIRMWARE_KAVERI 	"radeon/kaveri_uvd.bin"
+#define FIRMWARE_HAWAII 	"radeon/hawaii_uvd.bin"
+#define FIRMWARE_MULLINS	"radeon/mullins_uvd.bin"
+#endif
+#define FIRMWARE_TONGA		"amdgpu/tonga_uvd.bin"
+#define FIRMWARE_CARRIZO	"amdgpu/carrizo_uvd.bin"
+
+/**
+ * amdgpu_uvd_cs_ctx - Command submission parser context
+ *
+ * Used for emulating virtual memory support on UVD 4.2.
+ */
+struct amdgpu_uvd_cs_ctx {
+	struct amdgpu_cs_parser *parser;
+	unsigned reg, count;
+	unsigned data0, data1;
+	unsigned idx;
+	unsigned ib_idx;
+
+	/* does the IB has a msg command */
+	bool has_msg_cmd;
+
+	/* minimum buffer sizes */
+	unsigned *buf_sizes;
+};
+
+#ifdef CONFIG_DRM_AMDGPU_CIK
+MODULE_FIRMWARE(FIRMWARE_BONAIRE);
+MODULE_FIRMWARE(FIRMWARE_KABINI);
+MODULE_FIRMWARE(FIRMWARE_KAVERI);
+MODULE_FIRMWARE(FIRMWARE_HAWAII);
+MODULE_FIRMWARE(FIRMWARE_MULLINS);
+#endif
+MODULE_FIRMWARE(FIRMWARE_TONGA);
+MODULE_FIRMWARE(FIRMWARE_CARRIZO);
+
+static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
+static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
+
+int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+{
+	unsigned long bo_size;
+	const char *fw_name;
+	const struct common_firmware_header *hdr;
+	unsigned version_major, version_minor, family_id;
+	int i, r;
+
+	INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
+
+	switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_CIK
+	case CHIP_BONAIRE:
+		fw_name = FIRMWARE_BONAIRE;
+		break;
+	case CHIP_KABINI:
+		fw_name = FIRMWARE_KABINI;
+		break;
+	case CHIP_KAVERI:
+		fw_name = FIRMWARE_KAVERI;
+		break;
+	case CHIP_HAWAII:
+		fw_name = FIRMWARE_HAWAII;
+		break;
+	case CHIP_MULLINS:
+		fw_name = FIRMWARE_MULLINS;
+		break;
+#endif
+	case CHIP_TONGA:
+		fw_name = FIRMWARE_TONGA;
+		break;
+	case CHIP_CARRIZO:
+		fw_name = FIRMWARE_CARRIZO;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
+	if (r) {
+		dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",
+			fw_name);
+		return r;
+	}
+
+	r = amdgpu_ucode_validate(adev->uvd.fw);
+	if (r) {
+		dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
+			fw_name);
+		release_firmware(adev->uvd.fw);
+		adev->uvd.fw = NULL;
+		return r;
+	}
+
+	hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
+	family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
+	version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
+	version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+	DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
+		version_major, version_minor, family_id);
+
+	bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
+		 +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
+	r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
+			     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->uvd.vcpu_bo);
+	if (r) {
+		dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
+		return r;
+	}
+
+	r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
+	if (r) {
+		amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+		dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r);
+		return r;
+	}
+
+	r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
+			  &adev->uvd.gpu_addr);
+	if (r) {
+		amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
+		amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+		dev_err(adev->dev, "(%d) UVD bo pin failed\n", r);
+		return r;
+	}
+
+	r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr);
+	if (r) {
+		dev_err(adev->dev, "(%d) UVD map failed\n", r);
+		return r;
+	}
+
+	amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
+
+	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+		atomic_set(&adev->uvd.handles[i], 0);
+		adev->uvd.filp[i] = NULL;
+	}
+
+	/* from uvd v5.0 HW addressing capacity increased to 64 bits */
+	if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
+		adev->uvd.address_64_bit = true;
+
+	return 0;
+}
+
+int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
+{
+	int r;
+
+	if (adev->uvd.vcpu_bo == NULL)
+		return 0;
+
+	r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
+	if (!r) {
+		amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
+		amdgpu_bo_unpin(adev->uvd.vcpu_bo);
+		amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
+	}
+
+	amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+
+	amdgpu_ring_fini(&adev->uvd.ring);
+
+	release_firmware(adev->uvd.fw);
+
+	return 0;
+}
+
+int amdgpu_uvd_suspend(struct amdgpu_device *adev)
+{
+	unsigned size;
+	void *ptr;
+	const struct common_firmware_header *hdr;
+	int i;
+
+	if (adev->uvd.vcpu_bo == NULL)
+		return 0;
+
+	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+		if (atomic_read(&adev->uvd.handles[i]))
+			break;
+
+	if (i == AMDGPU_MAX_UVD_HANDLES)
+		return 0;
+
+	hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
+
+	size = amdgpu_bo_size(adev->uvd.vcpu_bo);
+	size -= le32_to_cpu(hdr->ucode_size_bytes);
+
+	ptr = adev->uvd.cpu_addr;
+	ptr += le32_to_cpu(hdr->ucode_size_bytes);
+
+	adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
+	memcpy(adev->uvd.saved_bo, ptr, size);
+
+	return 0;
+}
+
+int amdgpu_uvd_resume(struct amdgpu_device *adev)
+{
+	unsigned size;
+	void *ptr;
+	const struct common_firmware_header *hdr;
+	unsigned offset;
+
+	if (adev->uvd.vcpu_bo == NULL)
+		return -EINVAL;
+
+	hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
+	offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+	memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
+		(adev->uvd.fw->size) - offset);
+
+	size = amdgpu_bo_size(adev->uvd.vcpu_bo);
+	size -= le32_to_cpu(hdr->ucode_size_bytes);
+	ptr = adev->uvd.cpu_addr;
+	ptr += le32_to_cpu(hdr->ucode_size_bytes);
+
+	if (adev->uvd.saved_bo != NULL) {
+		memcpy(ptr, adev->uvd.saved_bo, size);
+		kfree(adev->uvd.saved_bo);
+		adev->uvd.saved_bo = NULL;
+	} else
+		memset(ptr, 0, size);
+
+	return 0;
+}
+
+void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
+{
+	struct amdgpu_ring *ring = &adev->uvd.ring;
+	int i, r;
+
+	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+		uint32_t handle = atomic_read(&adev->uvd.handles[i]);
+		if (handle != 0 && adev->uvd.filp[i] == filp) {
+			struct amdgpu_fence *fence;
+
+			amdgpu_uvd_note_usage(adev);
+
+			r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence);
+			if (r) {
+				DRM_ERROR("Error destroying UVD (%d)!\n", r);
+				continue;
+			}
+
+			amdgpu_fence_wait(fence, false);
+			amdgpu_fence_unref(&fence);
+
+			adev->uvd.filp[i] = NULL;
+			atomic_set(&adev->uvd.handles[i], 0);
+		}
+	}
+}
+
+static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *rbo)
+{
+	int i;
+	for (i = 0; i < rbo->placement.num_placement; ++i) {
+		rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
+		rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
+	}
+}
+
+/**
+ * amdgpu_uvd_cs_pass1 - first parsing round
+ *
+ * @ctx: UVD parser context
+ *
+ * Make sure UVD message and feedback buffers are in VRAM and
+ * nobody is violating an 256MB boundary.
+ */
+static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
+{
+	struct amdgpu_bo_va_mapping *mapping;
+	struct amdgpu_bo *bo;
+	uint32_t cmd, lo, hi;
+	uint64_t addr;
+	int r = 0;
+
+	lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
+	hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
+	addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
+
+	mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
+	if (mapping == NULL) {
+		DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
+		return -EINVAL;
+	}
+
+	if (!ctx->parser->adev->uvd.address_64_bit) {
+		/* check if it's a message or feedback command */
+		cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
+		if (cmd == 0x0 || cmd == 0x3) {
+			/* yes, force it into VRAM */
+			uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
+			amdgpu_ttm_placement_from_domain(bo, domain);
+		}
+		amdgpu_uvd_force_into_uvd_segment(bo);
+
+		r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+	}
+
+	return r;
+}
+
+/**
+ * amdgpu_uvd_cs_msg_decode - handle UVD decode message
+ *
+ * @msg: pointer to message structure
+ * @buf_sizes: returned buffer sizes
+ *
+ * Peek into the decode message and calculate the necessary buffer sizes.
+ */
+static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
+{
+	unsigned stream_type = msg[4];
+	unsigned width = msg[6];
+	unsigned height = msg[7];
+	unsigned dpb_size = msg[9];
+	unsigned pitch = msg[28];
+	unsigned level = msg[57];
+
+	unsigned width_in_mb = width / 16;
+	unsigned height_in_mb = ALIGN(height / 16, 2);
+	unsigned fs_in_mb = width_in_mb * height_in_mb;
+
+	unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
+
+	image_size = width * height;
+	image_size += image_size / 2;
+	image_size = ALIGN(image_size, 1024);
+
+	switch (stream_type) {
+	case 0: /* H264 */
+	case 7: /* H264 Perf */
+		switch(level) {
+		case 30:
+			num_dpb_buffer = 8100 / fs_in_mb;
+			break;
+		case 31:
+			num_dpb_buffer = 18000 / fs_in_mb;
+			break;
+		case 32:
+			num_dpb_buffer = 20480 / fs_in_mb;
+			break;
+		case 41:
+			num_dpb_buffer = 32768 / fs_in_mb;
+			break;
+		case 42:
+			num_dpb_buffer = 34816 / fs_in_mb;
+			break;
+		case 50:
+			num_dpb_buffer = 110400 / fs_in_mb;
+			break;
+		case 51:
+			num_dpb_buffer = 184320 / fs_in_mb;
+			break;
+		default:
+			num_dpb_buffer = 184320 / fs_in_mb;
+			break;
+		}
+		num_dpb_buffer++;
+		if (num_dpb_buffer > 17)
+			num_dpb_buffer = 17;
+
+		/* reference picture buffer */
+		min_dpb_size = image_size * num_dpb_buffer;
+
+		/* macroblock context buffer */
+		min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192;
+
+		/* IT surface buffer */
+		min_dpb_size += width_in_mb * height_in_mb * 32;
+		break;
+
+	case 1: /* VC1 */
+
+		/* reference picture buffer */
+		min_dpb_size = image_size * 3;
+
+		/* CONTEXT_BUFFER */
+		min_dpb_size += width_in_mb * height_in_mb * 128;
+
+		/* IT surface buffer */
+		min_dpb_size += width_in_mb * 64;
+
+		/* DB surface buffer */
+		min_dpb_size += width_in_mb * 128;
+
+		/* BP */
+		tmp = max(width_in_mb, height_in_mb);
+		min_dpb_size += ALIGN(tmp * 7 * 16, 64);
+		break;
+
+	case 3: /* MPEG2 */
+
+		/* reference picture buffer */
+		min_dpb_size = image_size * 3;
+		break;
+
+	case 4: /* MPEG4 */
+
+		/* reference picture buffer */
+		min_dpb_size = image_size * 3;
+
+		/* CM */
+		min_dpb_size += width_in_mb * height_in_mb * 64;
+
+		/* IT surface buffer */
+		min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
+		break;
+
+	case 16: /* H265 */
+		image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
+		image_size = ALIGN(image_size, 256);
+
+		num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
+		min_dpb_size = image_size * num_dpb_buffer;
+		break;
+
+	default:
+		DRM_ERROR("UVD codec not handled %d!\n", stream_type);
+		return -EINVAL;
+	}
+
+	if (width > pitch) {
+		DRM_ERROR("Invalid UVD decoding target pitch!\n");
+		return -EINVAL;
+	}
+
+	if (dpb_size < min_dpb_size) {
+		DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
+			  dpb_size, min_dpb_size);
+		return -EINVAL;
+	}
+
+	buf_sizes[0x1] = dpb_size;
+	buf_sizes[0x2] = image_size;
+	return 0;
+}
+
+/**
+ * amdgpu_uvd_cs_msg - handle UVD message
+ *
+ * @ctx: UVD parser context
+ * @bo: buffer object containing the message
+ * @offset: offset into the buffer object
+ *
+ * Peek into the UVD message and extract the session id.
+ * Make sure that we don't open up to many sessions.
+ */
+static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
+			     struct amdgpu_bo *bo, unsigned offset)
+{
+	struct amdgpu_device *adev = ctx->parser->adev;
+	int32_t *msg, msg_type, handle;
+	struct fence *f;
+	void *ptr;
+
+	int i, r;
+
+	if (offset & 0x3F) {
+		DRM_ERROR("UVD messages must be 64 byte aligned!\n");
+		return -EINVAL;
+	}
+
+	f = reservation_object_get_excl(bo->tbo.resv);
+	if (f) {
+		r = amdgpu_fence_wait((struct amdgpu_fence *)f, false);
+		if (r) {
+			DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
+			return r;
+		}
+	}
+
+	r = amdgpu_bo_kmap(bo, &ptr);
+	if (r) {
+		DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
+		return r;
+	}
+
+	msg = ptr + offset;
+
+	msg_type = msg[1];
+	handle = msg[2];
+
+	if (handle == 0) {
+		DRM_ERROR("Invalid UVD handle!\n");
+		return -EINVAL;
+	}
+
+	if (msg_type == 1) {
+		/* it's a decode msg, calc buffer sizes */
+		r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes);
+		amdgpu_bo_kunmap(bo);
+		if (r)
+			return r;
+
+	} else if (msg_type == 2) {
+		/* it's a destroy msg, free the handle */
+		for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+			atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
+		amdgpu_bo_kunmap(bo);
+		return 0;
+	} else {
+		/* it's a create msg */
+		amdgpu_bo_kunmap(bo);
+
+		if (msg_type != 0) {
+			DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
+			return -EINVAL;
+		}
+
+		/* it's a create msg, no special handling needed */
+	}
+
+	/* create or decode, validate the handle */
+	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+		if (atomic_read(&adev->uvd.handles[i]) == handle)
+			return 0;
+	}
+
+	/* handle not found try to alloc a new one */
+	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+		if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
+			adev->uvd.filp[i] = ctx->parser->filp;
+			return 0;
+		}
+	}
+
+	DRM_ERROR("No more free UVD handles!\n");
+	return -EINVAL;
+}
+
+/**
+ * amdgpu_uvd_cs_pass2 - second parsing round
+ *
+ * @ctx: UVD parser context
+ *
+ * Patch buffer addresses, make sure buffer sizes are correct.
+ */
+static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
+{
+	struct amdgpu_bo_va_mapping *mapping;
+	struct amdgpu_bo *bo;
+	struct amdgpu_ib *ib;
+	uint32_t cmd, lo, hi;
+	uint64_t start, end;
+	uint64_t addr;
+	int r;
+
+	lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
+	hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
+	addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
+
+	mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
+	if (mapping == NULL)
+		return -EINVAL;
+
+	start = amdgpu_bo_gpu_offset(bo);
+
+	end = (mapping->it.last + 1 - mapping->it.start);
+	end = end * AMDGPU_GPU_PAGE_SIZE + start;
+
+	addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
+	start += addr;
+
+	ib = &ctx->parser->ibs[ctx->ib_idx];
+	ib->ptr[ctx->data0] = start & 0xFFFFFFFF;
+	ib->ptr[ctx->data1] = start >> 32;
+
+	cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
+	if (cmd < 0x4) {
+		if ((end - start) < ctx->buf_sizes[cmd]) {
+			DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
+				  (unsigned)(end - start),
+				  ctx->buf_sizes[cmd]);
+			return -EINVAL;
+		}
+
+	} else if ((cmd != 0x100) && (cmd != 0x204)) {
+		DRM_ERROR("invalid UVD command %X!\n", cmd);
+		return -EINVAL;
+	}
+
+	if (!ctx->parser->adev->uvd.address_64_bit) {
+		if ((start >> 28) != ((end - 1) >> 28)) {
+			DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
+				  start, end);
+			return -EINVAL;
+		}
+
+		if ((cmd == 0 || cmd == 0x3) &&
+		    (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) {
+			DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
+				  start, end);
+			return -EINVAL;
+		}
+	}
+
+	if (cmd == 0) {
+		ctx->has_msg_cmd = true;
+		r = amdgpu_uvd_cs_msg(ctx, bo, addr);
+		if (r)
+			return r;
+	} else if (!ctx->has_msg_cmd) {
+		DRM_ERROR("Message needed before other commands are send!\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * amdgpu_uvd_cs_reg - parse register writes
+ *
+ * @ctx: UVD parser context
+ * @cb: callback function
+ *
+ * Parse the register writes, call cb on each complete command.
+ */
+static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
+			     int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
+{
+	struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx];
+	int i, r;
+
+	ctx->idx++;
+	for (i = 0; i <= ctx->count; ++i) {
+		unsigned reg = ctx->reg + i;
+
+		if (ctx->idx >= ib->length_dw) {
+			DRM_ERROR("Register command after end of CS!\n");
+			return -EINVAL;
+		}
+
+		switch (reg) {
+		case mmUVD_GPCOM_VCPU_DATA0:
+			ctx->data0 = ctx->idx;
+			break;
+		case mmUVD_GPCOM_VCPU_DATA1:
+			ctx->data1 = ctx->idx;
+			break;
+		case mmUVD_GPCOM_VCPU_CMD:
+			r = cb(ctx);
+			if (r)
+				return r;
+			break;
+		case mmUVD_ENGINE_CNTL:
+			break;
+		default:
+			DRM_ERROR("Invalid reg 0x%X!\n", reg);
+			return -EINVAL;
+		}
+		ctx->idx++;
+	}
+	return 0;
+}
+
+/**
+ * amdgpu_uvd_cs_packets - parse UVD packets
+ *
+ * @ctx: UVD parser context
+ * @cb: callback function
+ *
+ * Parse the command stream packets.
+ */
+static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
+				 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
+{
+	struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx];
+	int r;
+
+	for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) {
+		uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx);
+		unsigned type = CP_PACKET_GET_TYPE(cmd);
+		switch (type) {
+		case PACKET_TYPE0:
+			ctx->reg = CP_PACKET0_GET_REG(cmd);
+			ctx->count = CP_PACKET_GET_COUNT(cmd);
+			r = amdgpu_uvd_cs_reg(ctx, cb);
+			if (r)
+				return r;
+			break;
+		case PACKET_TYPE2:
+			++ctx->idx;
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d !\n", type);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+/**
+ * amdgpu_uvd_ring_parse_cs - UVD command submission parser
+ *
+ * @parser: Command submission parser context
+ *
+ * Parse the command stream, patch in addresses as necessary.
+ */
+int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
+{
+	struct amdgpu_uvd_cs_ctx ctx = {};
+	unsigned buf_sizes[] = {
+		[0x00000000]	=	2048,
+		[0x00000001]	=	32 * 1024 * 1024,
+		[0x00000002]	=	2048 * 1152 * 3,
+		[0x00000003]	=	2048,
+	};
+	struct amdgpu_ib *ib = &parser->ibs[ib_idx];
+	int r;
+
+	if (ib->length_dw % 16) {
+		DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
+			  ib->length_dw);
+		return -EINVAL;
+	}
+
+	ctx.parser = parser;
+	ctx.buf_sizes = buf_sizes;
+	ctx.ib_idx = ib_idx;
+
+	/* first round, make sure the buffers are actually in the UVD segment */
+	r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
+	if (r)
+		return r;
+
+	/* second round, patch buffer addresses into the command stream */
+	r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
+	if (r)
+		return r;
+
+	if (!ctx.has_msg_cmd) {
+		DRM_ERROR("UVD-IBs need a msg command!\n");
+		return -EINVAL;
+	}
+
+	amdgpu_uvd_note_usage(ctx.parser->adev);
+
+	return 0;
+}
+
+static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
+			       struct amdgpu_bo *bo,
+			       struct amdgpu_fence **fence)
+{
+	struct ttm_validate_buffer tv;
+	struct ww_acquire_ctx ticket;
+	struct list_head head;
+	struct amdgpu_ib ib;
+	uint64_t addr;
+	int i, r;
+
+	memset(&tv, 0, sizeof(tv));
+	tv.bo = &bo->tbo;
+
+	INIT_LIST_HEAD(&head);
+	list_add(&tv.head, &head);
+
+	r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
+	if (r)
+		return r;
+
+	if (!bo->adev->uvd.address_64_bit) {
+		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+		amdgpu_uvd_force_into_uvd_segment(bo);
+	}
+
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+	if (r)
+		goto err;
+
+	r = amdgpu_ib_get(ring, NULL, 64, &ib);
+	if (r)
+		goto err;
+
+	addr = amdgpu_bo_gpu_offset(bo);
+	ib.ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
+	ib.ptr[1] = addr;
+	ib.ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
+	ib.ptr[3] = addr >> 32;
+	ib.ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
+	ib.ptr[5] = 0;
+	for (i = 6; i < 16; ++i)
+		ib.ptr[i] = PACKET2(0);
+	ib.length_dw = 16;
+
+	r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
+	if (r)
+		goto err;
+	ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base);
+
+	if (fence)
+		*fence = amdgpu_fence_ref(ib.fence);
+
+	amdgpu_ib_free(ring->adev, &ib);
+	amdgpu_bo_unref(&bo);
+	return 0;
+
+err:
+	ttm_eu_backoff_reservation(&ticket, &head);
+	return r;
+}
+
+/* multiple fence commands without any stream commands in between can
+   crash the vcpu so just try to emmit a dummy create/destroy msg to
+   avoid this */
+int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+			      struct amdgpu_fence **fence)
+{
+	struct amdgpu_device *adev = ring->adev;
+	struct amdgpu_bo *bo;
+	uint32_t *msg;
+	int r, i;
+
+	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
+			     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo);
+	if (r)
+		return r;
+
+	r = amdgpu_bo_reserve(bo, false);
+	if (r) {
+		amdgpu_bo_unref(&bo);
+		return r;
+	}
+
+	r = amdgpu_bo_kmap(bo, (void **)&msg);
+	if (r) {
+		amdgpu_bo_unreserve(bo);
+		amdgpu_bo_unref(&bo);
+		return r;
+	}
+
+	/* stitch together an UVD create msg */
+	msg[0] = cpu_to_le32(0x00000de4);
+	msg[1] = cpu_to_le32(0x00000000);
+	msg[2] = cpu_to_le32(handle);
+	msg[3] = cpu_to_le32(0x00000000);
+	msg[4] = cpu_to_le32(0x00000000);
+	msg[5] = cpu_to_le32(0x00000000);
+	msg[6] = cpu_to_le32(0x00000000);
+	msg[7] = cpu_to_le32(0x00000780);
+	msg[8] = cpu_to_le32(0x00000440);
+	msg[9] = cpu_to_le32(0x00000000);
+	msg[10] = cpu_to_le32(0x01b37000);
+	for (i = 11; i < 1024; ++i)
+		msg[i] = cpu_to_le32(0x0);
+
+	amdgpu_bo_kunmap(bo);
+	amdgpu_bo_unreserve(bo);
+
+	return amdgpu_uvd_send_msg(ring, bo, fence);
+}
+
+int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+			       struct amdgpu_fence **fence)
+{
+	struct amdgpu_device *adev = ring->adev;
+	struct amdgpu_bo *bo;
+	uint32_t *msg;
+	int r, i;
+
+	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
+			     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo);
+	if (r)
+		return r;
+
+	r = amdgpu_bo_reserve(bo, false);
+	if (r) {
+		amdgpu_bo_unref(&bo);
+		return r;
+	}
+
+	r = amdgpu_bo_kmap(bo, (void **)&msg);
+	if (r) {
+		amdgpu_bo_unreserve(bo);
+		amdgpu_bo_unref(&bo);
+		return r;
+	}
+
+	/* stitch together an UVD destroy msg */
+	msg[0] = cpu_to_le32(0x00000de4);
+	msg[1] = cpu_to_le32(0x00000002);
+	msg[2] = cpu_to_le32(handle);
+	msg[3] = cpu_to_le32(0x00000000);
+	for (i = 4; i < 1024; ++i)
+		msg[i] = cpu_to_le32(0x0);
+
+	amdgpu_bo_kunmap(bo);
+	amdgpu_bo_unreserve(bo);
+
+	return amdgpu_uvd_send_msg(ring, bo, fence);
+}
+
+static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
+{
+	struct amdgpu_device *adev =
+		container_of(work, struct amdgpu_device, uvd.idle_work.work);
+	unsigned i, fences, handles = 0;
+
+	fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
+
+	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+		if (atomic_read(&adev->uvd.handles[i]))
+			++handles;
+
+	if (fences == 0 && handles == 0) {
+		if (adev->pm.dpm_enabled) {
+			amdgpu_dpm_enable_uvd(adev, false);
+		} else {
+			amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+		}
+	} else {
+		schedule_delayed_work(&adev->uvd.idle_work,
+				      msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+	}
+}
+
+static void amdgpu_uvd_note_usage(struct amdgpu_device *adev)
+{
+	bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
+	set_clocks &= schedule_delayed_work(&adev->uvd.idle_work,
+					    msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+
+	if (set_clocks) {
+		if (adev->pm.dpm_enabled) {
+			amdgpu_dpm_enable_uvd(adev, true);
+		} else {
+			amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
+		}
+	}
+}

+ 39 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h

@@ -0,0 +1,39 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_UVD_H__
+#define __AMDGPU_UVD_H__
+
+int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
+int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
+int amdgpu_uvd_suspend(struct amdgpu_device *adev);
+int amdgpu_uvd_resume(struct amdgpu_device *adev);
+int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+			      struct amdgpu_fence **fence);
+int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+			       struct amdgpu_fence **fence);
+void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
+			     struct drm_file *filp);
+int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
+
+#endif

+ 724 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c

@@ -0,0 +1,724 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_vce.h"
+#include "cikd.h"
+
+/* 1 second timeout */
+#define VCE_IDLE_TIMEOUT_MS	1000
+
+/* Firmware Names */
+#ifdef CONFIG_DRM_AMDGPU_CIK
+#define FIRMWARE_BONAIRE	"radeon/bonaire_vce.bin"
+#define FIRMWARE_KABINI 	"radeon/kabini_vce.bin"
+#define FIRMWARE_KAVERI 	"radeon/kaveri_vce.bin"
+#define FIRMWARE_HAWAII 	"radeon/hawaii_vce.bin"
+#define FIRMWARE_MULLINS	"radeon/mullins_vce.bin"
+#endif
+#define FIRMWARE_TONGA		"amdgpu/tonga_vce.bin"
+#define FIRMWARE_CARRIZO	"amdgpu/carrizo_vce.bin"
+
+#ifdef CONFIG_DRM_AMDGPU_CIK
+MODULE_FIRMWARE(FIRMWARE_BONAIRE);
+MODULE_FIRMWARE(FIRMWARE_KABINI);
+MODULE_FIRMWARE(FIRMWARE_KAVERI);
+MODULE_FIRMWARE(FIRMWARE_HAWAII);
+MODULE_FIRMWARE(FIRMWARE_MULLINS);
+#endif
+MODULE_FIRMWARE(FIRMWARE_TONGA);
+MODULE_FIRMWARE(FIRMWARE_CARRIZO);
+
+static void amdgpu_vce_idle_work_handler(struct work_struct *work);
+
+/**
+ * amdgpu_vce_init - allocate memory, load vce firmware
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * First step to get VCE online, allocate memory and load the firmware
+ */
+int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
+{
+	const char *fw_name;
+	const struct common_firmware_header *hdr;
+	unsigned ucode_version, version_major, version_minor, binary_id;
+	int i, r;
+
+	INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
+
+	switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_CIK
+	case CHIP_BONAIRE:
+		fw_name = FIRMWARE_BONAIRE;
+		break;
+	case CHIP_KAVERI:
+		fw_name = FIRMWARE_KAVERI;
+		break;
+	case CHIP_KABINI:
+		fw_name = FIRMWARE_KABINI;
+		break;
+	case CHIP_HAWAII:
+		fw_name = FIRMWARE_HAWAII;
+		break;
+	case CHIP_MULLINS:
+		fw_name = FIRMWARE_MULLINS;
+		break;
+#endif
+	case CHIP_TONGA:
+		fw_name = FIRMWARE_TONGA;
+		break;
+	case CHIP_CARRIZO:
+		fw_name = FIRMWARE_CARRIZO;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
+	if (r) {
+		dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
+			fw_name);
+		return r;
+	}
+
+	r = amdgpu_ucode_validate(adev->vce.fw);
+	if (r) {
+		dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
+			fw_name);
+		release_firmware(adev->vce.fw);
+		adev->vce.fw = NULL;
+		return r;
+	}
+
+	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
+
+	ucode_version = le32_to_cpu(hdr->ucode_version);
+	version_major = (ucode_version >> 20) & 0xfff;
+	version_minor = (ucode_version >> 8) & 0xfff;
+	binary_id = ucode_version & 0xff;
+	DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
+		version_major, version_minor, binary_id);
+	adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
+				(binary_id << 8));
+
+	/* allocate firmware, stack and heap BO */
+
+	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
+			     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->vce.vcpu_bo);
+	if (r) {
+		dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
+		return r;
+	}
+
+	r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
+	if (r) {
+		amdgpu_bo_unref(&adev->vce.vcpu_bo);
+		dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
+		return r;
+	}
+
+	r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
+			  &adev->vce.gpu_addr);
+	amdgpu_bo_unreserve(adev->vce.vcpu_bo);
+	if (r) {
+		amdgpu_bo_unref(&adev->vce.vcpu_bo);
+		dev_err(adev->dev, "(%d) VCE bo pin failed\n", r);
+		return r;
+	}
+
+	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
+		atomic_set(&adev->vce.handles[i], 0);
+		adev->vce.filp[i] = NULL;
+	}
+
+	return 0;
+}
+
+/**
+ * amdgpu_vce_fini - free memory
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Last step on VCE teardown, free firmware memory
+ */
+int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
+{
+	if (adev->vce.vcpu_bo == NULL)
+		return 0;
+
+	amdgpu_bo_unref(&adev->vce.vcpu_bo);
+
+	amdgpu_ring_fini(&adev->vce.ring[0]);
+	amdgpu_ring_fini(&adev->vce.ring[1]);
+
+	release_firmware(adev->vce.fw);
+
+	return 0;
+}
+
+/**
+ * amdgpu_vce_suspend - unpin VCE fw memory
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+int amdgpu_vce_suspend(struct amdgpu_device *adev)
+{
+	int i;
+
+	if (adev->vce.vcpu_bo == NULL)
+		return 0;
+
+	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
+		if (atomic_read(&adev->vce.handles[i]))
+			break;
+
+	if (i == AMDGPU_MAX_VCE_HANDLES)
+		return 0;
+
+	/* TODO: suspending running encoding sessions isn't supported */
+	return -EINVAL;
+}
+
+/**
+ * amdgpu_vce_resume - pin VCE fw memory
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+int amdgpu_vce_resume(struct amdgpu_device *adev)
+{
+	void *cpu_addr;
+	const struct common_firmware_header *hdr;
+	unsigned offset;
+	int r;
+
+	if (adev->vce.vcpu_bo == NULL)
+		return -EINVAL;
+
+	r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
+	if (r) {
+		dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
+		return r;
+	}
+
+	r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
+	if (r) {
+		amdgpu_bo_unreserve(adev->vce.vcpu_bo);
+		dev_err(adev->dev, "(%d) VCE map failed\n", r);
+		return r;
+	}
+
+	hdr = (const struct common_firmware_header *)adev->vce.fw->data;
+	offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+	memcpy(cpu_addr, (adev->vce.fw->data) + offset,
+		(adev->vce.fw->size) - offset);
+
+	amdgpu_bo_kunmap(adev->vce.vcpu_bo);
+
+	amdgpu_bo_unreserve(adev->vce.vcpu_bo);
+
+	return 0;
+}
+
+/**
+ * amdgpu_vce_idle_work_handler - power off VCE
+ *
+ * @work: pointer to work structure
+ *
+ * power of VCE when it's not used any more
+ */
+static void amdgpu_vce_idle_work_handler(struct work_struct *work)
+{
+	struct amdgpu_device *adev =
+		container_of(work, struct amdgpu_device, vce.idle_work.work);
+
+	if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) &&
+	    (amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) {
+		if (adev->pm.dpm_enabled) {
+			amdgpu_dpm_enable_vce(adev, false);
+		} else {
+			amdgpu_asic_set_vce_clocks(adev, 0, 0);
+		}
+	} else {
+		schedule_delayed_work(&adev->vce.idle_work,
+				      msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
+	}
+}
+
+/**
+ * amdgpu_vce_note_usage - power up VCE
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Make sure VCE is powerd up when we want to use it
+ */
+static void amdgpu_vce_note_usage(struct amdgpu_device *adev)
+{
+	bool streams_changed = false;
+	bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
+	set_clocks &= schedule_delayed_work(&adev->vce.idle_work,
+					    msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
+
+	if (adev->pm.dpm_enabled) {
+		/* XXX figure out if the streams changed */
+		streams_changed = false;
+	}
+
+	if (set_clocks || streams_changed) {
+		if (adev->pm.dpm_enabled) {
+			amdgpu_dpm_enable_vce(adev, true);
+		} else {
+			amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
+		}
+	}
+}
+
+/**
+ * amdgpu_vce_free_handles - free still open VCE handles
+ *
+ * @adev: amdgpu_device pointer
+ * @filp: drm file pointer
+ *
+ * Close all VCE handles still open by this file pointer
+ */
+void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
+{
+	struct amdgpu_ring *ring = &adev->vce.ring[0];
+	int i, r;
+	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
+		uint32_t handle = atomic_read(&adev->vce.handles[i]);
+		if (!handle || adev->vce.filp[i] != filp)
+			continue;
+
+		amdgpu_vce_note_usage(adev);
+
+		r = amdgpu_vce_get_destroy_msg(ring, handle, NULL);
+		if (r)
+			DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
+
+		adev->vce.filp[i] = NULL;
+		atomic_set(&adev->vce.handles[i], 0);
+	}
+}
+
+/**
+ * amdgpu_vce_get_create_msg - generate a VCE create msg
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: ring we should submit the msg to
+ * @handle: VCE session handle to use
+ * @fence: optional fence to return
+ *
+ * Open up a stream for HW test
+ */
+int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+			      struct amdgpu_fence **fence)
+{
+	const unsigned ib_size_dw = 1024;
+	struct amdgpu_ib ib;
+	uint64_t dummy;
+	int i, r;
+
+	r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib);
+	if (r) {
+		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+		return r;
+	}
+
+	dummy = ib.gpu_addr + 1024;
+
+	/* stitch together an VCE create msg */
+	ib.length_dw = 0;
+	ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
+	ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
+	ib.ptr[ib.length_dw++] = handle;
+
+	ib.ptr[ib.length_dw++] = 0x00000030; /* len */
+	ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */
+	ib.ptr[ib.length_dw++] = 0x00000000;
+	ib.ptr[ib.length_dw++] = 0x00000042;
+	ib.ptr[ib.length_dw++] = 0x0000000a;
+	ib.ptr[ib.length_dw++] = 0x00000001;
+	ib.ptr[ib.length_dw++] = 0x00000080;
+	ib.ptr[ib.length_dw++] = 0x00000060;
+	ib.ptr[ib.length_dw++] = 0x00000100;
+	ib.ptr[ib.length_dw++] = 0x00000100;
+	ib.ptr[ib.length_dw++] = 0x0000000c;
+	ib.ptr[ib.length_dw++] = 0x00000000;
+
+	ib.ptr[ib.length_dw++] = 0x00000014; /* len */
+	ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
+	ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
+	ib.ptr[ib.length_dw++] = dummy;
+	ib.ptr[ib.length_dw++] = 0x00000001;
+
+	for (i = ib.length_dw; i < ib_size_dw; ++i)
+		ib.ptr[i] = 0x0;
+
+	r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
+	if (r) {
+		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
+	}
+
+	if (fence)
+		*fence = amdgpu_fence_ref(ib.fence);
+
+	amdgpu_ib_free(ring->adev, &ib);
+
+	return r;
+}
+
+/**
+ * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: ring we should submit the msg to
+ * @handle: VCE session handle to use
+ * @fence: optional fence to return
+ *
+ * Close up a stream for HW test or if userspace failed to do so
+ */
+int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+			       struct amdgpu_fence **fence)
+{
+	const unsigned ib_size_dw = 1024;
+	struct amdgpu_ib ib;
+	uint64_t dummy;
+	int i, r;
+
+	r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib);
+	if (r) {
+		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+		return r;
+	}
+
+	dummy = ib.gpu_addr + 1024;
+
+	/* stitch together an VCE destroy msg */
+	ib.length_dw = 0;
+	ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
+	ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
+	ib.ptr[ib.length_dw++] = handle;
+
+	ib.ptr[ib.length_dw++] = 0x00000014; /* len */
+	ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
+	ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
+	ib.ptr[ib.length_dw++] = dummy;
+	ib.ptr[ib.length_dw++] = 0x00000001;
+
+	ib.ptr[ib.length_dw++] = 0x00000008; /* len */
+	ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */
+
+	for (i = ib.length_dw; i < ib_size_dw; ++i)
+		ib.ptr[i] = 0x0;
+
+	r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
+	if (r) {
+		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
+	}
+
+	if (fence)
+		*fence = amdgpu_fence_ref(ib.fence);
+
+	amdgpu_ib_free(ring->adev, &ib);
+
+	return r;
+}
+
+/**
+ * amdgpu_vce_cs_reloc - command submission relocation
+ *
+ * @p: parser context
+ * @lo: address of lower dword
+ * @hi: address of higher dword
+ *
+ * Patch relocation inside command stream with real buffer address
+ */
+int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int hi)
+{
+	struct amdgpu_bo_va_mapping *mapping;
+	struct amdgpu_ib *ib = &p->ibs[ib_idx];
+	struct amdgpu_bo *bo;
+	uint64_t addr;
+
+	addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
+	       ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
+
+	mapping = amdgpu_cs_find_mapping(p, addr, &bo);
+	if (mapping == NULL) {
+		DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d\n",
+			  addr, lo, hi);
+		return -EINVAL;
+	}
+
+	addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
+	addr += amdgpu_bo_gpu_offset(bo);
+
+	ib->ptr[lo] = addr & 0xFFFFFFFF;
+	ib->ptr[hi] = addr >> 32;
+
+	return 0;
+}
+
+/**
+ * amdgpu_vce_cs_parse - parse and validate the command stream
+ *
+ * @p: parser context
+ *
+ */
+int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
+{
+	uint32_t handle = 0;
+	bool destroy = false;
+	int i, r, idx = 0;
+	struct amdgpu_ib *ib = &p->ibs[ib_idx];
+
+	amdgpu_vce_note_usage(p->adev);
+
+	while (idx < ib->length_dw) {
+		uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
+		uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
+
+		if ((len < 8) || (len & 3)) {
+			DRM_ERROR("invalid VCE command length (%d)!\n", len);
+			return -EINVAL;
+		}
+
+		switch (cmd) {
+		case 0x00000001: // session
+			handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
+			break;
+
+		case 0x00000002: // task info
+		case 0x01000001: // create
+		case 0x04000001: // config extension
+		case 0x04000002: // pic control
+		case 0x04000005: // rate control
+		case 0x04000007: // motion estimation
+		case 0x04000008: // rdo
+		case 0x04000009: // vui
+		case 0x05000002: // auxiliary buffer
+			break;
+
+		case 0x03000001: // encode
+			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9);
+			if (r)
+				return r;
+
+			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11);
+			if (r)
+				return r;
+			break;
+
+		case 0x02000001: // destroy
+			destroy = true;
+			break;
+
+		case 0x05000001: // context buffer
+		case 0x05000004: // video bitstream buffer
+		case 0x05000005: // feedback buffer
+			r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2);
+			if (r)
+				return r;
+			break;
+
+		default:
+			DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
+			return -EINVAL;
+		}
+
+		idx += len / 4;
+	}
+
+	if (destroy) {
+		/* IB contains a destroy msg, free the handle */
+		for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
+			atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
+
+		return 0;
+	}
+
+	/* create or encode, validate the handle */
+	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
+		if (atomic_read(&p->adev->vce.handles[i]) == handle)
+			return 0;
+	}
+
+	/* handle not found try to alloc a new one */
+	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
+		if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
+			p->adev->vce.filp[i] = p->filp;
+			return 0;
+		}
+	}
+
+	DRM_ERROR("No more free VCE handles!\n");
+
+	return -EINVAL;
+}
+
+/**
+ * amdgpu_vce_ring_emit_semaphore - emit a semaphore command
+ *
+ * @ring: engine to use
+ * @semaphore: address of semaphore
+ * @emit_wait: true=emit wait, false=emit signal
+ *
+ */
+bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
+				    struct amdgpu_semaphore *semaphore,
+				    bool emit_wait)
+{
+	uint64_t addr = semaphore->gpu_addr;
+
+	amdgpu_ring_write(ring, VCE_CMD_SEMAPHORE);
+	amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+	amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+	amdgpu_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0));
+	if (!emit_wait)
+		amdgpu_ring_write(ring, VCE_CMD_END);
+
+	return true;
+}
+
+/**
+ * amdgpu_vce_ring_emit_ib - execute indirect buffer
+ *
+ * @ring: engine to use
+ * @ib: the IB to execute
+ *
+ */
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
+{
+	amdgpu_ring_write(ring, VCE_CMD_IB);
+	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+	amdgpu_ring_write(ring, ib->length_dw);
+}
+
+/**
+ * amdgpu_vce_ring_emit_fence - add a fence command to the ring
+ *
+ * @ring: engine to use
+ * @fence: the fence
+ *
+ */
+void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+				bool write64bits)
+{
+	WARN_ON(write64bits);
+
+	amdgpu_ring_write(ring, VCE_CMD_FENCE);
+	amdgpu_ring_write(ring, addr);
+	amdgpu_ring_write(ring, upper_32_bits(addr));
+	amdgpu_ring_write(ring, seq);
+	amdgpu_ring_write(ring, VCE_CMD_TRAP);
+	amdgpu_ring_write(ring, VCE_CMD_END);
+}
+
+/**
+ * amdgpu_vce_ring_test_ring - test if VCE ring is working
+ *
+ * @ring: the engine to test on
+ *
+ */
+int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	uint32_t rptr = amdgpu_ring_get_rptr(ring);
+	unsigned i;
+	int r;
+
+	r = amdgpu_ring_lock(ring, 16);
+	if (r) {
+		DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
+			  ring->idx, r);
+		return r;
+	}
+	amdgpu_ring_write(ring, VCE_CMD_END);
+	amdgpu_ring_unlock_commit(ring);
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (amdgpu_ring_get_rptr(ring) != rptr)
+			break;
+		DRM_UDELAY(1);
+	}
+
+	if (i < adev->usec_timeout) {
+		DRM_INFO("ring test on %d succeeded in %d usecs\n",
+			 ring->idx, i);
+	} else {
+		DRM_ERROR("amdgpu: ring %d test failed\n",
+			  ring->idx);
+		r = -ETIMEDOUT;
+	}
+
+	return r;
+}
+
+/**
+ * amdgpu_vce_ring_test_ib - test if VCE IBs are working
+ *
+ * @ring: the engine to test on
+ *
+ */
+int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
+{
+	struct amdgpu_fence *fence = NULL;
+	int r;
+
+	r = amdgpu_vce_get_create_msg(ring, 1, NULL);
+	if (r) {
+		DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
+		goto error;
+	}
+
+	r = amdgpu_vce_get_destroy_msg(ring, 1, &fence);
+	if (r) {
+		DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
+		goto error;
+	}
+
+	r = amdgpu_fence_wait(fence, false);
+	if (r) {
+		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+	} else {
+		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+	}
+error:
+	amdgpu_fence_unref(&fence);
+	return r;
+}

+ 47 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h

@@ -0,0 +1,47 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_VCE_H__
+#define __AMDGPU_VCE_H__
+
+int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size);
+int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
+int amdgpu_vce_suspend(struct amdgpu_device *adev);
+int amdgpu_vce_resume(struct amdgpu_device *adev);
+int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+			      struct amdgpu_fence **fence);
+int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+			       struct amdgpu_fence **fence);
+void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
+int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int hi);
+int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
+				    struct amdgpu_semaphore *semaphore,
+				    bool emit_wait);
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
+void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+				bool write64bit);
+int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
+int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring);
+
+#endif

+ 1265 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

@@ -0,0 +1,1265 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+#include "amdgpu_trace.h"
+
+/*
+ * GPUVM
+ * GPUVM is similar to the legacy gart on older asics, however
+ * rather than there being a single global gart table
+ * for the entire GPU, there are multiple VM page tables active
+ * at any given time.  The VM page tables can contain a mix
+ * vram pages and system memory pages and system memory pages
+ * can be mapped as snooped (cached system pages) or unsnooped
+ * (uncached system pages).
+ * Each VM has an ID associated with it and there is a page table
+ * associated with each VMID.  When execting a command buffer,
+ * the kernel tells the the ring what VMID to use for that command
+ * buffer.  VMIDs are allocated dynamically as commands are submitted.
+ * The userspace drivers maintain their own address space and the kernel
+ * sets up their pages tables accordingly when they submit their
+ * command buffers and a VMID is assigned.
+ * Cayman/Trinity support up to 8 active VMs at any given time;
+ * SI supports 16.
+ */
+
+/**
+ * amdgpu_vm_num_pde - return the number of page directory entries
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate the number of page directory entries (cayman+).
+ */
+static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
+{
+	return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
+}
+
+/**
+ * amdgpu_vm_directory_size - returns the size of the page directory in bytes
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate the size of the page directory in bytes (cayman+).
+ */
+static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
+{
+	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
+}
+
+/**
+ * amdgpu_vm_get_bos - add the vm BOs to a validation list
+ *
+ * @vm: vm providing the BOs
+ * @head: head of validation list
+ *
+ * Add the page directory to the list of BOs to
+ * validate for command submission (cayman+).
+ */
+struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
+					  struct amdgpu_vm *vm,
+					  struct list_head *head)
+{
+	struct amdgpu_bo_list_entry *list;
+	unsigned i, idx;
+
+	mutex_lock(&vm->mutex);
+	list = drm_malloc_ab(vm->max_pde_used + 2,
+			     sizeof(struct amdgpu_bo_list_entry));
+	if (!list) {
+		mutex_unlock(&vm->mutex);
+		return NULL;
+	}
+
+	/* add the vm page table to the list */
+	list[0].robj = vm->page_directory;
+	list[0].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
+	list[0].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
+	list[0].priority = 0;
+	list[0].tv.bo = &vm->page_directory->tbo;
+	list[0].tv.shared = true;
+	list_add(&list[0].tv.head, head);
+
+	for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
+		if (!vm->page_tables[i].bo)
+			continue;
+
+		list[idx].robj = vm->page_tables[i].bo;
+		list[idx].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
+		list[idx].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
+		list[idx].priority = 0;
+		list[idx].tv.bo = &list[idx].robj->tbo;
+		list[idx].tv.shared = true;
+		list_add(&list[idx++].tv.head, head);
+	}
+	mutex_unlock(&vm->mutex);
+
+	return list;
+}
+
+/**
+ * amdgpu_vm_grab_id - allocate the next free VMID
+ *
+ * @ring: ring we want to submit job to
+ * @vm: vm to allocate id for
+ *
+ * Allocate an id for the vm (cayman+).
+ * Returns the fence we need to sync to (if any).
+ *
+ * Global and local mutex must be locked!
+ */
+struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
+				       struct amdgpu_vm *vm)
+{
+	struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {};
+	struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
+	struct amdgpu_device *adev = ring->adev;
+
+	unsigned choices[2] = {};
+	unsigned i;
+
+	/* check if the id is still valid */
+	if (vm_id->id && vm_id->last_id_use &&
+	    vm_id->last_id_use == adev->vm_manager.active[vm_id->id])
+		return NULL;
+
+	/* we definately need to flush */
+	vm_id->pd_gpu_addr = ~0ll;
+
+	/* skip over VMID 0, since it is the system VM */
+	for (i = 1; i < adev->vm_manager.nvm; ++i) {
+		struct amdgpu_fence *fence = adev->vm_manager.active[i];
+
+		if (fence == NULL) {
+			/* found a free one */
+			vm_id->id = i;
+			trace_amdgpu_vm_grab_id(i, ring->idx);
+			return NULL;
+		}
+
+		if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) {
+			best[fence->ring->idx] = fence;
+			choices[fence->ring == ring ? 0 : 1] = i;
+		}
+	}
+
+	for (i = 0; i < 2; ++i) {
+		if (choices[i]) {
+			vm_id->id = choices[i];
+			trace_amdgpu_vm_grab_id(choices[i], ring->idx);
+			return adev->vm_manager.active[choices[i]];
+		}
+	}
+
+	/* should never happen */
+	BUG();
+	return NULL;
+}
+
+/**
+ * amdgpu_vm_flush - hardware flush the vm
+ *
+ * @ring: ring to use for flush
+ * @vm: vm we want to flush
+ * @updates: last vm update that we waited for
+ *
+ * Flush the vm (cayman+).
+ *
+ * Global and local mutex must be locked!
+ */
+void amdgpu_vm_flush(struct amdgpu_ring *ring,
+		     struct amdgpu_vm *vm,
+		     struct amdgpu_fence *updates)
+{
+	uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
+	struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
+
+	if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates ||
+	    amdgpu_fence_is_earlier(vm_id->flushed_updates, updates)) {
+
+		trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
+		amdgpu_fence_unref(&vm_id->flushed_updates);
+		vm_id->flushed_updates = amdgpu_fence_ref(updates);
+		vm_id->pd_gpu_addr = pd_addr;
+		amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
+	}
+}
+
+/**
+ * amdgpu_vm_fence - remember fence for vm
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: vm we want to fence
+ * @fence: fence to remember
+ *
+ * Fence the vm (cayman+).
+ * Set the fence used to protect page table and id.
+ *
+ * Global and local mutex must be locked!
+ */
+void amdgpu_vm_fence(struct amdgpu_device *adev,
+		     struct amdgpu_vm *vm,
+		     struct amdgpu_fence *fence)
+{
+	unsigned ridx = fence->ring->idx;
+	unsigned vm_id = vm->ids[ridx].id;
+
+	amdgpu_fence_unref(&adev->vm_manager.active[vm_id]);
+	adev->vm_manager.active[vm_id] = amdgpu_fence_ref(fence);
+
+	amdgpu_fence_unref(&vm->ids[ridx].last_id_use);
+	vm->ids[ridx].last_id_use = amdgpu_fence_ref(fence);
+}
+
+/**
+ * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
+ *
+ * @vm: requested vm
+ * @bo: requested buffer object
+ *
+ * Find @bo inside the requested vm (cayman+).
+ * Search inside the @bos vm list for the requested vm
+ * Returns the found bo_va or NULL if none is found
+ *
+ * Object has to be reserved!
+ */
+struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
+				       struct amdgpu_bo *bo)
+{
+	struct amdgpu_bo_va *bo_va;
+
+	list_for_each_entry(bo_va, &bo->va, bo_list) {
+		if (bo_va->vm == vm) {
+			return bo_va;
+		}
+	}
+	return NULL;
+}
+
+/**
+ * amdgpu_vm_update_pages - helper to call the right asic function
+ *
+ * @adev: amdgpu_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ * @gtt_flags: GTT hw access flags
+ *
+ * Traces the parameters and calls the right asic functions
+ * to setup the page table using the DMA.
+ */
+static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
+				   struct amdgpu_ib *ib,
+				   uint64_t pe, uint64_t addr,
+				   unsigned count, uint32_t incr,
+				   uint32_t flags, uint32_t gtt_flags)
+{
+	trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
+
+	if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) {
+		uint64_t src = adev->gart.table_addr + (addr >> 12) * 8;
+		amdgpu_vm_copy_pte(adev, ib, pe, src, count);
+
+	} else if ((flags & AMDGPU_PTE_SYSTEM) || (count < 3)) {
+		amdgpu_vm_write_pte(adev, ib, pe, addr,
+				      count, incr, flags);
+
+	} else {
+		amdgpu_vm_set_pte_pde(adev, ib, pe, addr,
+				      count, incr, flags);
+	}
+}
+
+/**
+ * amdgpu_vm_clear_bo - initially clear the page dir/table
+ *
+ * @adev: amdgpu_device pointer
+ * @bo: bo to clear
+ */
+static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
+			      struct amdgpu_bo *bo)
+{
+	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
+	struct amdgpu_ib ib;
+	unsigned entries;
+	uint64_t addr;
+	int r;
+
+	r = amdgpu_bo_reserve(bo, false);
+	if (r)
+		return r;
+
+	r = reservation_object_reserve_shared(bo->tbo.resv);
+	if (r)
+		return r;
+
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+	if (r)
+		goto error_unreserve;
+
+	addr = amdgpu_bo_gpu_offset(bo);
+	entries = amdgpu_bo_size(bo) / 8;
+
+	r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, &ib);
+	if (r)
+		goto error_unreserve;
+
+	ib.length_dw = 0;
+
+	amdgpu_vm_update_pages(adev, &ib, addr, 0, entries, 0, 0, 0);
+	amdgpu_vm_pad_ib(adev, &ib);
+	WARN_ON(ib.length_dw > 64);
+
+	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
+	if (r)
+		goto error_free;
+
+	amdgpu_bo_fence(bo, ib.fence, true);
+
+error_free:
+	amdgpu_ib_free(adev, &ib);
+
+error_unreserve:
+	amdgpu_bo_unreserve(bo);
+	return r;
+}
+
+/**
+ * amdgpu_vm_map_gart - get the physical address of a gart page
+ *
+ * @adev: amdgpu_device pointer
+ * @addr: the unmapped addr
+ *
+ * Look up the physical address of the page that the pte resolves
+ * to (cayman+).
+ * Returns the physical address of the page.
+ */
+uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr)
+{
+	uint64_t result;
+
+	/* page table offset */
+	result = adev->gart.pages_addr[addr >> PAGE_SHIFT];
+
+	/* in case cpu page size != gpu page size*/
+	result |= addr & (~PAGE_MASK);
+
+	return result;
+}
+
+/**
+ * amdgpu_vm_update_pdes - make sure that page directory is valid
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory (cayman+).
+ * Returns 0 for success, error for failure.
+ *
+ * Global and local mutex must be locked!
+ */
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+				    struct amdgpu_vm *vm)
+{
+	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
+	struct amdgpu_bo *pd = vm->page_directory;
+	uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
+	uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
+	uint64_t last_pde = ~0, last_pt = ~0;
+	unsigned count = 0, pt_idx, ndw;
+	struct amdgpu_ib ib;
+	int r;
+
+	/* padding, etc. */
+	ndw = 64;
+
+	/* assume the worst case */
+	ndw += vm->max_pde_used * 6;
+
+	/* update too big for an IB */
+	if (ndw > 0xfffff)
+		return -ENOMEM;
+
+	r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib);
+	if (r)
+		return r;
+	ib.length_dw = 0;
+
+	/* walk over the address space and update the page directory */
+	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
+		struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
+		uint64_t pde, pt;
+
+		if (bo == NULL)
+			continue;
+
+		pt = amdgpu_bo_gpu_offset(bo);
+		if (vm->page_tables[pt_idx].addr == pt)
+			continue;
+		vm->page_tables[pt_idx].addr = pt;
+
+		pde = pd_addr + pt_idx * 8;
+		if (((last_pde + 8 * count) != pde) ||
+		    ((last_pt + incr * count) != pt)) {
+
+			if (count) {
+				amdgpu_vm_update_pages(adev, &ib, last_pde,
+						       last_pt, count, incr,
+						       AMDGPU_PTE_VALID, 0);
+			}
+
+			count = 1;
+			last_pde = pde;
+			last_pt = pt;
+		} else {
+			++count;
+		}
+	}
+
+	if (count)
+		amdgpu_vm_update_pages(adev, &ib, last_pde, last_pt, count,
+				       incr, AMDGPU_PTE_VALID, 0);
+
+	if (ib.length_dw != 0) {
+		amdgpu_vm_pad_ib(adev, &ib);
+		amdgpu_sync_resv(adev, &ib.sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
+		WARN_ON(ib.length_dw > ndw);
+		r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
+		if (r) {
+			amdgpu_ib_free(adev, &ib);
+			return r;
+		}
+		amdgpu_bo_fence(pd, ib.fence, true);
+	}
+	amdgpu_ib_free(adev, &ib);
+
+	return 0;
+}
+
+/**
+ * amdgpu_vm_frag_ptes - add fragment information to PTEs
+ *
+ * @adev: amdgpu_device pointer
+ * @ib: IB for the update
+ * @pe_start: first PTE to handle
+ * @pe_end: last PTE to handle
+ * @addr: addr those PTEs should point to
+ * @flags: hw mapping flags
+ * @gtt_flags: GTT hw mapping flags
+ *
+ * Global and local mutex must be locked!
+ */
+static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
+				struct amdgpu_ib *ib,
+				uint64_t pe_start, uint64_t pe_end,
+				uint64_t addr, uint32_t flags,
+				uint32_t gtt_flags)
+{
+	/**
+	 * The MC L1 TLB supports variable sized pages, based on a fragment
+	 * field in the PTE. When this field is set to a non-zero value, page
+	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
+	 * flags are considered valid for all PTEs within the fragment range
+	 * and corresponding mappings are assumed to be physically contiguous.
+	 *
+	 * The L1 TLB can store a single PTE for the whole fragment,
+	 * significantly increasing the space available for translation
+	 * caching. This leads to large improvements in throughput when the
+	 * TLB is under pressure.
+	 *
+	 * The L2 TLB distributes small and large fragments into two
+	 * asymmetric partitions. The large fragment cache is significantly
+	 * larger. Thus, we try to use large fragments wherever possible.
+	 * Userspace can support this by aligning virtual base address and
+	 * allocation size to the fragment size.
+	 */
+
+	/* SI and newer are optimized for 64KB */
+	uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB;
+	uint64_t frag_align = 0x80;
+
+	uint64_t frag_start = ALIGN(pe_start, frag_align);
+	uint64_t frag_end = pe_end & ~(frag_align - 1);
+
+	unsigned count;
+
+	/* system pages are non continuously */
+	if ((flags & AMDGPU_PTE_SYSTEM) || !(flags & AMDGPU_PTE_VALID) ||
+	    (frag_start >= frag_end)) {
+
+		count = (pe_end - pe_start) / 8;
+		amdgpu_vm_update_pages(adev, ib, pe_start, addr, count,
+				       AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
+		return;
+	}
+
+	/* handle the 4K area at the beginning */
+	if (pe_start != frag_start) {
+		count = (frag_start - pe_start) / 8;
+		amdgpu_vm_update_pages(adev, ib, pe_start, addr, count,
+				       AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
+		addr += AMDGPU_GPU_PAGE_SIZE * count;
+	}
+
+	/* handle the area in the middle */
+	count = (frag_end - frag_start) / 8;
+	amdgpu_vm_update_pages(adev, ib, frag_start, addr, count,
+			       AMDGPU_GPU_PAGE_SIZE, flags | frag_flags,
+			       gtt_flags);
+
+	/* handle the 4K area at the end */
+	if (frag_end != pe_end) {
+		addr += AMDGPU_GPU_PAGE_SIZE * count;
+		count = (pe_end - frag_end) / 8;
+		amdgpu_vm_update_pages(adev, ib, frag_end, addr, count,
+				       AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
+	}
+}
+
+/**
+ * amdgpu_vm_update_ptes - make sure that page tables are valid
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ * @dst: destination address to map to
+ * @flags: mapping flags
+ *
+ * Update the page tables in the range @start - @end (cayman+).
+ *
+ * Global and local mutex must be locked!
+ */
+static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
+				 struct amdgpu_vm *vm,
+				 struct amdgpu_ib *ib,
+				 uint64_t start, uint64_t end,
+				 uint64_t dst, uint32_t flags,
+				 uint32_t gtt_flags)
+{
+	uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
+	uint64_t last_pte = ~0, last_dst = ~0;
+	unsigned count = 0;
+	uint64_t addr;
+
+	/* walk over the address space and update the page tables */
+	for (addr = start; addr < end; ) {
+		uint64_t pt_idx = addr >> amdgpu_vm_block_size;
+		struct amdgpu_bo *pt = vm->page_tables[pt_idx].bo;
+		unsigned nptes;
+		uint64_t pte;
+		int r;
+
+		amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv,
+				 AMDGPU_FENCE_OWNER_VM);
+		r = reservation_object_reserve_shared(pt->tbo.resv);
+		if (r)
+			return r;
+
+		if ((addr & ~mask) == (end & ~mask))
+			nptes = end - addr;
+		else
+			nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
+
+		pte = amdgpu_bo_gpu_offset(pt);
+		pte += (addr & mask) * 8;
+
+		if ((last_pte + 8 * count) != pte) {
+
+			if (count) {
+				amdgpu_vm_frag_ptes(adev, ib, last_pte,
+						    last_pte + 8 * count,
+						    last_dst, flags,
+						    gtt_flags);
+			}
+
+			count = nptes;
+			last_pte = pte;
+			last_dst = dst;
+		} else {
+			count += nptes;
+		}
+
+		addr += nptes;
+		dst += nptes * AMDGPU_GPU_PAGE_SIZE;
+	}
+
+	if (count) {
+		amdgpu_vm_frag_ptes(adev, ib, last_pte,
+				    last_pte + 8 * count,
+				    last_dst, flags, gtt_flags);
+	}
+
+	return 0;
+}
+
+/**
+ * amdgpu_vm_fence_pts - fence page tables after an update
+ *
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ * @fence: fence to use
+ *
+ * Fence the page tables in the range @start - @end (cayman+).
+ *
+ * Global and local mutex must be locked!
+ */
+static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
+				uint64_t start, uint64_t end,
+				struct amdgpu_fence *fence)
+{
+	unsigned i;
+
+	start >>= amdgpu_vm_block_size;
+	end >>= amdgpu_vm_block_size;
+
+	for (i = start; i <= end; ++i)
+		amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
+}
+
+/**
+ * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ * @mapping: mapped range and flags to use for the update
+ * @addr: addr to set the area to
+ * @gtt_flags: flags as they are used for GTT
+ * @fence: optional resulting fence
+ *
+ * Fill in the page table entries for @mapping.
+ * Returns 0 for success, -EINVAL for failure.
+ *
+ * Object have to be reserved and mutex must be locked!
+ */
+static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+				       struct amdgpu_vm *vm,
+				       struct amdgpu_bo_va_mapping *mapping,
+				       uint64_t addr, uint32_t gtt_flags,
+				       struct amdgpu_fence **fence)
+{
+	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
+	unsigned nptes, ncmds, ndw;
+	uint32_t flags = gtt_flags;
+	struct amdgpu_ib ib;
+	int r;
+
+	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
+	 * but in case of something, we filter the flags in first place
+	 */
+	if (!(mapping->flags & AMDGPU_PTE_READABLE))
+		flags &= ~AMDGPU_PTE_READABLE;
+	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
+		flags &= ~AMDGPU_PTE_WRITEABLE;
+
+	trace_amdgpu_vm_bo_update(mapping);
+
+	nptes = mapping->it.last - mapping->it.start + 1;
+
+	/*
+	 * reserve space for one command every (1 << BLOCK_SIZE)
+	 *  entries or 2k dwords (whatever is smaller)
+	 */
+	ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
+
+	/* padding, etc. */
+	ndw = 64;
+
+	if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) {
+		/* only copy commands needed */
+		ndw += ncmds * 7;
+
+	} else if (flags & AMDGPU_PTE_SYSTEM) {
+		/* header for write data commands */
+		ndw += ncmds * 4;
+
+		/* body of write data command */
+		ndw += nptes * 2;
+
+	} else {
+		/* set page commands needed */
+		ndw += ncmds * 10;
+
+		/* two extra commands for begin/end of fragment */
+		ndw += 2 * 10;
+	}
+
+	/* update too big for an IB */
+	if (ndw > 0xfffff)
+		return -ENOMEM;
+
+	r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib);
+	if (r)
+		return r;
+	ib.length_dw = 0;
+
+	if (!(flags & AMDGPU_PTE_VALID)) {
+		unsigned i;
+
+		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+			struct amdgpu_fence *f = vm->ids[i].last_id_use;
+			amdgpu_sync_fence(&ib.sync, f);
+		}
+	}
+
+	r = amdgpu_vm_update_ptes(adev, vm, &ib, mapping->it.start,
+				  mapping->it.last + 1, addr + mapping->offset,
+				  flags, gtt_flags);
+
+	if (r) {
+		amdgpu_ib_free(adev, &ib);
+		return r;
+	}
+
+	amdgpu_vm_pad_ib(adev, &ib);
+	WARN_ON(ib.length_dw > ndw);
+
+	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
+	if (r) {
+		amdgpu_ib_free(adev, &ib);
+		return r;
+	}
+	amdgpu_vm_fence_pts(vm, mapping->it.start,
+			    mapping->it.last + 1, ib.fence);
+	if (fence) {
+		amdgpu_fence_unref(fence);
+		*fence = amdgpu_fence_ref(ib.fence);
+	}
+	amdgpu_ib_free(adev, &ib);
+
+	return 0;
+}
+
+/**
+ * amdgpu_vm_bo_update - update all BO mappings in the vm page table
+ *
+ * @adev: amdgpu_device pointer
+ * @bo_va: requested BO and VM object
+ * @mem: ttm mem
+ *
+ * Fill in the page table entries for @bo_va.
+ * Returns 0 for success, -EINVAL for failure.
+ *
+ * Object have to be reserved and mutex must be locked!
+ */
+int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+			struct amdgpu_bo_va *bo_va,
+			struct ttm_mem_reg *mem)
+{
+	struct amdgpu_vm *vm = bo_va->vm;
+	struct amdgpu_bo_va_mapping *mapping;
+	uint32_t flags;
+	uint64_t addr;
+	int r;
+
+	if (mem) {
+		addr = mem->start << PAGE_SHIFT;
+		if (mem->mem_type != TTM_PL_TT)
+			addr += adev->vm_manager.vram_base_offset;
+	} else {
+		addr = 0;
+	}
+
+	if (addr == bo_va->addr)
+		return 0;
+
+	flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
+
+	list_for_each_entry(mapping, &bo_va->mappings, list) {
+		r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr,
+						flags, &bo_va->last_pt_update);
+		if (r)
+			return r;
+	}
+
+	bo_va->addr = addr;
+	spin_lock(&vm->status_lock);
+	list_del_init(&bo_va->vm_status);
+	spin_unlock(&vm->status_lock);
+
+	return 0;
+}
+
+/**
+ * amdgpu_vm_clear_freed - clear freed BOs in the PT
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ *
+ * Make sure all freed BOs are cleared in the PT.
+ * Returns 0 for success.
+ *
+ * PTs have to be reserved and mutex must be locked!
+ */
+int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
+			  struct amdgpu_vm *vm)
+{
+	struct amdgpu_bo_va_mapping *mapping;
+	int r;
+
+	while (!list_empty(&vm->freed)) {
+		mapping = list_first_entry(&vm->freed,
+			struct amdgpu_bo_va_mapping, list);
+		list_del(&mapping->list);
+
+		r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
+		kfree(mapping);
+		if (r)
+			return r;
+
+	}
+	return 0;
+
+}
+
+/**
+ * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ *
+ * Make sure all invalidated BOs are cleared in the PT.
+ * Returns 0 for success.
+ *
+ * PTs have to be reserved and mutex must be locked!
+ */
+int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
+			     struct amdgpu_vm *vm, struct amdgpu_sync *sync)
+{
+	struct amdgpu_bo_va *bo_va = NULL;
+	int r;
+
+	spin_lock(&vm->status_lock);
+	while (!list_empty(&vm->invalidated)) {
+		bo_va = list_first_entry(&vm->invalidated,
+			struct amdgpu_bo_va, vm_status);
+		spin_unlock(&vm->status_lock);
+
+		r = amdgpu_vm_bo_update(adev, bo_va, NULL);
+		if (r)
+			return r;
+
+		spin_lock(&vm->status_lock);
+	}
+	spin_unlock(&vm->status_lock);
+
+	if (bo_va)
+		amdgpu_sync_fence(sync, bo_va->last_pt_update);
+	return 0;
+}
+
+/**
+ * amdgpu_vm_bo_add - add a bo to a specific vm
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ * @bo: amdgpu buffer object
+ *
+ * Add @bo into the requested vm (cayman+).
+ * Add @bo to the list of bos associated with the vm
+ * Returns newly added bo_va or NULL for failure
+ *
+ * Object has to be reserved!
+ */
+struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
+				      struct amdgpu_vm *vm,
+				      struct amdgpu_bo *bo)
+{
+	struct amdgpu_bo_va *bo_va;
+
+	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
+	if (bo_va == NULL) {
+		return NULL;
+	}
+	bo_va->vm = vm;
+	bo_va->bo = bo;
+	bo_va->addr = 0;
+	bo_va->ref_count = 1;
+	INIT_LIST_HEAD(&bo_va->bo_list);
+	INIT_LIST_HEAD(&bo_va->mappings);
+	INIT_LIST_HEAD(&bo_va->vm_status);
+
+	mutex_lock(&vm->mutex);
+	list_add_tail(&bo_va->bo_list, &bo->va);
+	mutex_unlock(&vm->mutex);
+
+	return bo_va;
+}
+
+/**
+ * amdgpu_vm_bo_map - map bo inside a vm
+ *
+ * @adev: amdgpu_device pointer
+ * @bo_va: bo_va to store the address
+ * @saddr: where to map the BO
+ * @offset: requested offset in the BO
+ * @flags: attributes of pages (read/write/valid/etc.)
+ *
+ * Add a mapping of the BO at the specefied addr into the VM.
+ * Returns 0 for success, error for failure.
+ *
+ * Object has to be reserved and gets unreserved by this function!
+ */
+int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+		     struct amdgpu_bo_va *bo_va,
+		     uint64_t saddr, uint64_t offset,
+		     uint64_t size, uint32_t flags)
+{
+	struct amdgpu_bo_va_mapping *mapping;
+	struct amdgpu_vm *vm = bo_va->vm;
+	struct interval_tree_node *it;
+	unsigned last_pfn, pt_idx;
+	uint64_t eaddr;
+	int r;
+
+	/* validate the parameters */
+	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
+	    size == 0 || size & AMDGPU_GPU_PAGE_MASK) {
+		amdgpu_bo_unreserve(bo_va->bo);
+		return -EINVAL;
+	}
+
+	/* make sure object fit at this offset */
+	eaddr = saddr + size;
+	if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) {
+		amdgpu_bo_unreserve(bo_va->bo);
+		return -EINVAL;
+	}
+
+	last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
+	if (last_pfn > adev->vm_manager.max_pfn) {
+		dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
+			last_pfn, adev->vm_manager.max_pfn);
+		amdgpu_bo_unreserve(bo_va->bo);
+		return -EINVAL;
+	}
+
+	mutex_lock(&vm->mutex);
+
+	saddr /= AMDGPU_GPU_PAGE_SIZE;
+	eaddr /= AMDGPU_GPU_PAGE_SIZE;
+
+	it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
+	if (it) {
+		struct amdgpu_bo_va_mapping *tmp;
+		tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
+		/* bo and tmp overlap, invalid addr */
+		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
+			"0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
+			tmp->it.start, tmp->it.last + 1);
+		amdgpu_bo_unreserve(bo_va->bo);
+		r = -EINVAL;
+		goto error_unlock;
+	}
+
+	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
+	if (!mapping) {
+		amdgpu_bo_unreserve(bo_va->bo);
+		r = -ENOMEM;
+		goto error_unlock;
+	}
+
+	INIT_LIST_HEAD(&mapping->list);
+	mapping->it.start = saddr;
+	mapping->it.last = eaddr - 1;
+	mapping->offset = offset;
+	mapping->flags = flags;
+
+	list_add(&mapping->list, &bo_va->mappings);
+	interval_tree_insert(&mapping->it, &vm->va);
+
+	/* Make sure the page tables are allocated */
+	saddr >>= amdgpu_vm_block_size;
+	eaddr >>= amdgpu_vm_block_size;
+
+	BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
+
+	if (eaddr > vm->max_pde_used)
+		vm->max_pde_used = eaddr;
+
+	amdgpu_bo_unreserve(bo_va->bo);
+
+	/* walk over the address space and allocate the page tables */
+	for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
+		struct amdgpu_bo *pt;
+
+		if (vm->page_tables[pt_idx].bo)
+			continue;
+
+		/* drop mutex to allocate and clear page table */
+		mutex_unlock(&vm->mutex);
+
+		r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
+				     AMDGPU_GPU_PAGE_SIZE, true,
+				     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &pt);
+		if (r)
+			goto error_free;
+
+		r = amdgpu_vm_clear_bo(adev, pt);
+		if (r) {
+			amdgpu_bo_unref(&pt);
+			goto error_free;
+		}
+
+		/* aquire mutex again */
+		mutex_lock(&vm->mutex);
+		if (vm->page_tables[pt_idx].bo) {
+			/* someone else allocated the pt in the meantime */
+			mutex_unlock(&vm->mutex);
+			amdgpu_bo_unref(&pt);
+			mutex_lock(&vm->mutex);
+			continue;
+		}
+
+		vm->page_tables[pt_idx].addr = 0;
+		vm->page_tables[pt_idx].bo = pt;
+	}
+
+	mutex_unlock(&vm->mutex);
+	return 0;
+
+error_free:
+	mutex_lock(&vm->mutex);
+	list_del(&mapping->list);
+	interval_tree_remove(&mapping->it, &vm->va);
+	kfree(mapping);
+
+error_unlock:
+	mutex_unlock(&vm->mutex);
+	return r;
+}
+
+/**
+ * amdgpu_vm_bo_unmap - remove bo mapping from vm
+ *
+ * @adev: amdgpu_device pointer
+ * @bo_va: bo_va to remove the address from
+ * @saddr: where to the BO is mapped
+ *
+ * Remove a mapping of the BO at the specefied addr from the VM.
+ * Returns 0 for success, error for failure.
+ *
+ * Object has to be reserved and gets unreserved by this function!
+ */
+int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
+		       struct amdgpu_bo_va *bo_va,
+		       uint64_t saddr)
+{
+	struct amdgpu_bo_va_mapping *mapping;
+	struct amdgpu_vm *vm = bo_va->vm;
+
+	list_for_each_entry(mapping, &bo_va->mappings, list) {
+		if (mapping->it.start == saddr)
+			break;
+	}
+
+	if (&mapping->list == &bo_va->mappings) {
+		amdgpu_bo_unreserve(bo_va->bo);
+		return -ENOENT;
+	}
+
+	mutex_lock(&vm->mutex);
+	list_del(&mapping->list);
+	interval_tree_remove(&mapping->it, &vm->va);
+
+	if (bo_va->addr) {
+		/* clear the old address */
+		list_add(&mapping->list, &vm->freed);
+	} else {
+		kfree(mapping);
+	}
+	mutex_unlock(&vm->mutex);
+	amdgpu_bo_unreserve(bo_va->bo);
+
+	return 0;
+}
+
+/**
+ * amdgpu_vm_bo_rmv - remove a bo to a specific vm
+ *
+ * @adev: amdgpu_device pointer
+ * @bo_va: requested bo_va
+ *
+ * Remove @bo_va->bo from the requested vm (cayman+).
+ *
+ * Object have to be reserved!
+ */
+void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+		      struct amdgpu_bo_va *bo_va)
+{
+	struct amdgpu_bo_va_mapping *mapping, *next;
+	struct amdgpu_vm *vm = bo_va->vm;
+
+	list_del(&bo_va->bo_list);
+
+	mutex_lock(&vm->mutex);
+
+	spin_lock(&vm->status_lock);
+	list_del(&bo_va->vm_status);
+	spin_unlock(&vm->status_lock);
+
+	list_for_each_entry_safe(mapping, next, &bo_va->mappings, list) {
+		list_del(&mapping->list);
+		interval_tree_remove(&mapping->it, &vm->va);
+		if (bo_va->addr)
+			list_add(&mapping->list, &vm->freed);
+		else
+			kfree(mapping);
+	}
+	amdgpu_fence_unref(&bo_va->last_pt_update);
+	kfree(bo_va);
+
+	mutex_unlock(&vm->mutex);
+}
+
+/**
+ * amdgpu_vm_bo_invalidate - mark the bo as invalid
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ * @bo: amdgpu buffer object
+ *
+ * Mark @bo as invalid (cayman+).
+ */
+void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
+			     struct amdgpu_bo *bo)
+{
+	struct amdgpu_bo_va *bo_va;
+
+	list_for_each_entry(bo_va, &bo->va, bo_list) {
+		if (bo_va->addr) {
+			spin_lock(&bo_va->vm->status_lock);
+			list_del(&bo_va->vm_status);
+			list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
+			spin_unlock(&bo_va->vm->status_lock);
+		}
+	}
+}
+
+/**
+ * amdgpu_vm_init - initialize a vm instance
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ *
+ * Init @vm fields (cayman+).
+ */
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+{
+	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
+		AMDGPU_VM_PTE_COUNT * 8);
+	unsigned pd_size, pd_entries, pts_size;
+	int i, r;
+
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+		vm->ids[i].id = 0;
+		vm->ids[i].flushed_updates = NULL;
+		vm->ids[i].last_id_use = NULL;
+	}
+	mutex_init(&vm->mutex);
+	vm->va = RB_ROOT;
+	spin_lock_init(&vm->status_lock);
+	INIT_LIST_HEAD(&vm->invalidated);
+	INIT_LIST_HEAD(&vm->freed);
+
+	pd_size = amdgpu_vm_directory_size(adev);
+	pd_entries = amdgpu_vm_num_pdes(adev);
+
+	/* allocate page table array */
+	pts_size = pd_entries * sizeof(struct amdgpu_vm_pt);
+	vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
+	if (vm->page_tables == NULL) {
+		DRM_ERROR("Cannot allocate memory for page table array\n");
+		return -ENOMEM;
+	}
+
+	r = amdgpu_bo_create(adev, pd_size, align, true,
+			     AMDGPU_GEM_DOMAIN_VRAM, 0,
+			     NULL, &vm->page_directory);
+	if (r)
+		return r;
+
+	r = amdgpu_vm_clear_bo(adev, vm->page_directory);
+	if (r) {
+		amdgpu_bo_unref(&vm->page_directory);
+		vm->page_directory = NULL;
+		return r;
+	}
+
+	return 0;
+}
+
+/**
+ * amdgpu_vm_fini - tear down a vm instance
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ *
+ * Tear down @vm (cayman+).
+ * Unbind the VM and remove all bos from the vm bo list
+ */
+void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+{
+	struct amdgpu_bo_va_mapping *mapping, *tmp;
+	int i;
+
+	if (!RB_EMPTY_ROOT(&vm->va)) {
+		dev_err(adev->dev, "still active bo inside vm\n");
+	}
+	rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
+		list_del(&mapping->list);
+		interval_tree_remove(&mapping->it, &vm->va);
+		kfree(mapping);
+	}
+	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
+		list_del(&mapping->list);
+		kfree(mapping);
+	}
+
+	for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
+		amdgpu_bo_unref(&vm->page_tables[i].bo);
+	kfree(vm->page_tables);
+
+	amdgpu_bo_unref(&vm->page_directory);
+
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+		amdgpu_fence_unref(&vm->ids[i].flushed_updates);
+		amdgpu_fence_unref(&vm->ids[i].last_id_use);
+	}
+
+	mutex_destroy(&vm->mutex);
+}

+ 48 - 0
drivers/gpu/drm/amd/amdgpu/atom-bits.h

@@ -0,0 +1,48 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#ifndef ATOM_BITS_H
+#define ATOM_BITS_H
+
+static inline uint8_t get_u8(void *bios, int ptr)
+{
+    return ((unsigned char *)bios)[ptr];
+}
+#define U8(ptr) get_u8(ctx->ctx->bios, (ptr))
+#define CU8(ptr) get_u8(ctx->bios, (ptr))
+static inline uint16_t get_u16(void *bios, int ptr)
+{
+    return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
+}
+#define U16(ptr) get_u16(ctx->ctx->bios, (ptr))
+#define CU16(ptr) get_u16(ctx->bios, (ptr))
+static inline uint32_t get_u32(void *bios, int ptr)
+{
+    return get_u16(bios, ptr)|(((uint32_t)get_u16(bios, ptr+2))<<16);
+}
+#define U32(ptr) get_u32(ctx->ctx->bios, (ptr))
+#define CU32(ptr) get_u32(ctx->bios, (ptr))
+#define CSTR(ptr) (((char *)(ctx->bios))+(ptr))
+
+#endif

+ 100 - 0
drivers/gpu/drm/amd/amdgpu/atom-names.h

@@ -0,0 +1,100 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#ifndef ATOM_NAMES_H
+#define ATOM_NAMES_H
+
+#include "atom.h"
+
+#ifdef ATOM_DEBUG
+
+#define ATOM_OP_NAMES_CNT 123
+static char *atom_op_names[ATOM_OP_NAMES_CNT] = {
+"RESERVED", "MOVE_REG", "MOVE_PS", "MOVE_WS", "MOVE_FB", "MOVE_PLL",
+"MOVE_MC", "AND_REG", "AND_PS", "AND_WS", "AND_FB", "AND_PLL", "AND_MC",
+"OR_REG", "OR_PS", "OR_WS", "OR_FB", "OR_PLL", "OR_MC", "SHIFT_LEFT_REG",
+"SHIFT_LEFT_PS", "SHIFT_LEFT_WS", "SHIFT_LEFT_FB", "SHIFT_LEFT_PLL",
+"SHIFT_LEFT_MC", "SHIFT_RIGHT_REG", "SHIFT_RIGHT_PS", "SHIFT_RIGHT_WS",
+"SHIFT_RIGHT_FB", "SHIFT_RIGHT_PLL", "SHIFT_RIGHT_MC", "MUL_REG",
+"MUL_PS", "MUL_WS", "MUL_FB", "MUL_PLL", "MUL_MC", "DIV_REG", "DIV_PS",
+"DIV_WS", "DIV_FB", "DIV_PLL", "DIV_MC", "ADD_REG", "ADD_PS", "ADD_WS",
+"ADD_FB", "ADD_PLL", "ADD_MC", "SUB_REG", "SUB_PS", "SUB_WS", "SUB_FB",
+"SUB_PLL", "SUB_MC", "SET_ATI_PORT", "SET_PCI_PORT", "SET_SYS_IO_PORT",
+"SET_REG_BLOCK", "SET_FB_BASE", "COMPARE_REG", "COMPARE_PS",
+"COMPARE_WS", "COMPARE_FB", "COMPARE_PLL", "COMPARE_MC", "SWITCH",
+"JUMP", "JUMP_EQUAL", "JUMP_BELOW", "JUMP_ABOVE", "JUMP_BELOW_OR_EQUAL",
+"JUMP_ABOVE_OR_EQUAL", "JUMP_NOT_EQUAL", "TEST_REG", "TEST_PS", "TEST_WS",
+"TEST_FB", "TEST_PLL", "TEST_MC", "DELAY_MILLISEC", "DELAY_MICROSEC",
+"CALL_TABLE", "REPEAT", "CLEAR_REG", "CLEAR_PS", "CLEAR_WS", "CLEAR_FB",
+"CLEAR_PLL", "CLEAR_MC", "NOP", "EOT", "MASK_REG", "MASK_PS", "MASK_WS",
+"MASK_FB", "MASK_PLL", "MASK_MC", "POST_CARD", "BEEP", "SAVE_REG",
+"RESTORE_REG", "SET_DATA_BLOCK", "XOR_REG", "XOR_PS", "XOR_WS", "XOR_FB",
+"XOR_PLL", "XOR_MC", "SHL_REG", "SHL_PS", "SHL_WS", "SHL_FB", "SHL_PLL",
+"SHL_MC", "SHR_REG", "SHR_PS", "SHR_WS", "SHR_FB", "SHR_PLL", "SHR_MC",
+"DEBUG", "CTB_DS",
+};
+
+#define ATOM_TABLE_NAMES_CNT 74
+static char *atom_table_names[ATOM_TABLE_NAMES_CNT] = {
+"ASIC_Init", "GetDisplaySurfaceSize", "ASIC_RegistersInit",
+"VRAM_BlockVenderDetection", "SetClocksRatio", "MemoryControllerInit",
+"GPIO_PinInit", "MemoryParamAdjust", "DVOEncoderControl",
+"GPIOPinControl", "SetEngineClock", "SetMemoryClock", "SetPixelClock",
+"DynamicClockGating", "ResetMemoryDLL", "ResetMemoryDevice",
+"MemoryPLLInit", "EnableMemorySelfRefresh", "AdjustMemoryController",
+"EnableASIC_StaticPwrMgt", "ASIC_StaticPwrMgtStatusChange",
+"DAC_LoadDetection", "TMDS2EncoderControl", "LCD1OutputControl",
+"DAC1EncoderControl", "DAC2EncoderControl", "DVOOutputControl",
+"CV1OutputControl", "SetCRTC_DPM_State", "TVEncoderControl",
+"TMDS1EncoderControl", "LVDSEncoderControl", "TV1OutputControl",
+"EnableScaler", "BlankCRTC", "EnableCRTC", "GetPixelClock",
+"EnableVGA_Render", "EnableVGA_Access", "SetCRTC_Timing",
+"SetCRTC_OverScan", "SetCRTC_Replication", "SelectCRTC_Source",
+"EnableGraphSurfaces", "UpdateCRTC_DoubleBufferRegisters",
+"LUT_AutoFill", "EnableHW_IconCursor", "GetMemoryClock",
+"GetEngineClock", "SetCRTC_UsingDTDTiming", "TVBootUpStdPinDetection",
+"DFP2OutputControl", "VRAM_BlockDetectionByStrap", "MemoryCleanUp",
+"ReadEDIDFromHWAssistedI2C", "WriteOneByteToHWAssistedI2C",
+"ReadHWAssistedI2CStatus", "SpeedFanControl", "PowerConnectorDetection",
+"MC_Synchronization", "ComputeMemoryEnginePLL", "MemoryRefreshConversion",
+"VRAM_GetCurrentInfoBlock", "DynamicMemorySettings", "MemoryTraining",
+"EnableLVDS_SS", "DFP1OutputControl", "SetVoltage", "CRT1OutputControl",
+"CRT2OutputControl", "SetupHWAssistedI2CStatus", "ClockSource",
+"MemoryDeviceInit", "EnableYUV",
+};
+
+#define ATOM_IO_NAMES_CNT 5
+static char *atom_io_names[ATOM_IO_NAMES_CNT] = {
+"MM", "PLL", "MC", "PCIE", "PCIE PORT",
+};
+
+#else
+
+#define ATOM_OP_NAMES_CNT 0
+#define ATOM_TABLE_NAMES_CNT 0
+#define ATOM_IO_NAMES_CNT 0
+
+#endif
+
+#endif

+ 42 - 0
drivers/gpu/drm/amd/amdgpu/atom-types.h

@@ -0,0 +1,42 @@
+/*
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Dave Airlie
+ */
+
+#ifndef ATOM_TYPES_H
+#define ATOM_TYPES_H
+
+/* sync atom types to kernel types */
+
+typedef uint16_t USHORT;
+typedef uint32_t ULONG;
+typedef uint8_t UCHAR;
+
+
+#ifndef ATOM_BIG_ENDIAN
+#if defined(__BIG_ENDIAN)
+#define ATOM_BIG_ENDIAN 1
+#else
+#define ATOM_BIG_ENDIAN 0
+#endif
+#endif
+#endif

+ 1408 - 0
drivers/gpu/drm/amd/amdgpu/atom.c

@@ -0,0 +1,1408 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#define ATOM_DEBUG
+
+#include "atom.h"
+#include "atom-names.h"
+#include "atom-bits.h"
+#include "amdgpu.h"
+
+#define ATOM_COND_ABOVE		0
+#define ATOM_COND_ABOVEOREQUAL	1
+#define ATOM_COND_ALWAYS	2
+#define ATOM_COND_BELOW		3
+#define ATOM_COND_BELOWOREQUAL	4
+#define ATOM_COND_EQUAL		5
+#define ATOM_COND_NOTEQUAL	6
+
+#define ATOM_PORT_ATI	0
+#define ATOM_PORT_PCI	1
+#define ATOM_PORT_SYSIO	2
+
+#define ATOM_UNIT_MICROSEC	0
+#define ATOM_UNIT_MILLISEC	1
+
+#define PLL_INDEX	2
+#define PLL_DATA	3
+
+typedef struct {
+	struct atom_context *ctx;
+	uint32_t *ps, *ws;
+	int ps_shift;
+	uint16_t start;
+	unsigned last_jump;
+	unsigned long last_jump_jiffies;
+	bool abort;
+} atom_exec_context;
+
+int amdgpu_atom_debug = 0;
+static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
+int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
+
+static uint32_t atom_arg_mask[8] =
+    { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
+0xFF000000 };
+static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
+
+static int atom_dst_to_src[8][4] = {
+	/* translate destination alignment field to the source alignment encoding */
+	{0, 0, 0, 0},
+	{1, 2, 3, 0},
+	{1, 2, 3, 0},
+	{1, 2, 3, 0},
+	{4, 5, 6, 7},
+	{4, 5, 6, 7},
+	{4, 5, 6, 7},
+	{4, 5, 6, 7},
+};
+static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
+
+static int debug_depth = 0;
+#ifdef ATOM_DEBUG
+static void debug_print_spaces(int n)
+{
+	while (n--)
+		printk("   ");
+}
+
+#define DEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
+#define SDEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
+#else
+#define DEBUG(...) do { } while (0)
+#define SDEBUG(...) do { } while (0)
+#endif
+
+static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
+				 uint32_t index, uint32_t data)
+{
+	uint32_t temp = 0xCDCDCDCD;
+
+	while (1)
+		switch (CU8(base)) {
+		case ATOM_IIO_NOP:
+			base++;
+			break;
+		case ATOM_IIO_READ:
+			temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
+			base += 3;
+			break;
+		case ATOM_IIO_WRITE:
+			ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
+			base += 3;
+			break;
+		case ATOM_IIO_CLEAR:
+			temp &=
+			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+			      CU8(base + 2));
+			base += 3;
+			break;
+		case ATOM_IIO_SET:
+			temp |=
+			    (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
+									2);
+			base += 3;
+			break;
+		case ATOM_IIO_MOVE_INDEX:
+			temp &=
+			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+			      CU8(base + 3));
+			temp |=
+			    ((index >> CU8(base + 2)) &
+			     (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
+									  3);
+			base += 4;
+			break;
+		case ATOM_IIO_MOVE_DATA:
+			temp &=
+			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+			      CU8(base + 3));
+			temp |=
+			    ((data >> CU8(base + 2)) &
+			     (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
+									  3);
+			base += 4;
+			break;
+		case ATOM_IIO_MOVE_ATTR:
+			temp &=
+			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+			      CU8(base + 3));
+			temp |=
+			    ((ctx->
+			      io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
+									  CU8
+									  (base
+									   +
+									   1))))
+			    << CU8(base + 3);
+			base += 4;
+			break;
+		case ATOM_IIO_END:
+			return temp;
+		default:
+			printk(KERN_INFO "Unknown IIO opcode.\n");
+			return 0;
+		}
+}
+
+static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
+				 int *ptr, uint32_t *saved, int print)
+{
+	uint32_t idx, val = 0xCDCDCDCD, align, arg;
+	struct atom_context *gctx = ctx->ctx;
+	arg = attr & 7;
+	align = (attr >> 3) & 7;
+	switch (arg) {
+	case ATOM_ARG_REG:
+		idx = U16(*ptr);
+		(*ptr) += 2;
+		if (print)
+			DEBUG("REG[0x%04X]", idx);
+		idx += gctx->reg_block;
+		switch (gctx->io_mode) {
+		case ATOM_IO_MM:
+			val = gctx->card->reg_read(gctx->card, idx);
+			break;
+		case ATOM_IO_PCI:
+			printk(KERN_INFO
+			       "PCI registers are not implemented.\n");
+			return 0;
+		case ATOM_IO_SYSIO:
+			printk(KERN_INFO
+			       "SYSIO registers are not implemented.\n");
+			return 0;
+		default:
+			if (!(gctx->io_mode & 0x80)) {
+				printk(KERN_INFO "Bad IO mode.\n");
+				return 0;
+			}
+			if (!gctx->iio[gctx->io_mode & 0x7F]) {
+				printk(KERN_INFO
+				       "Undefined indirect IO read method %d.\n",
+				       gctx->io_mode & 0x7F);
+				return 0;
+			}
+			val =
+			    atom_iio_execute(gctx,
+					     gctx->iio[gctx->io_mode & 0x7F],
+					     idx, 0);
+		}
+		break;
+	case ATOM_ARG_PS:
+		idx = U8(*ptr);
+		(*ptr)++;
+		/* get_unaligned_le32 avoids unaligned accesses from atombios
+		 * tables, noticed on a DEC Alpha. */
+		val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
+		if (print)
+			DEBUG("PS[0x%02X,0x%04X]", idx, val);
+		break;
+	case ATOM_ARG_WS:
+		idx = U8(*ptr);
+		(*ptr)++;
+		if (print)
+			DEBUG("WS[0x%02X]", idx);
+		switch (idx) {
+		case ATOM_WS_QUOTIENT:
+			val = gctx->divmul[0];
+			break;
+		case ATOM_WS_REMAINDER:
+			val = gctx->divmul[1];
+			break;
+		case ATOM_WS_DATAPTR:
+			val = gctx->data_block;
+			break;
+		case ATOM_WS_SHIFT:
+			val = gctx->shift;
+			break;
+		case ATOM_WS_OR_MASK:
+			val = 1 << gctx->shift;
+			break;
+		case ATOM_WS_AND_MASK:
+			val = ~(1 << gctx->shift);
+			break;
+		case ATOM_WS_FB_WINDOW:
+			val = gctx->fb_base;
+			break;
+		case ATOM_WS_ATTRIBUTES:
+			val = gctx->io_attr;
+			break;
+		case ATOM_WS_REGPTR:
+			val = gctx->reg_block;
+			break;
+		default:
+			val = ctx->ws[idx];
+		}
+		break;
+	case ATOM_ARG_ID:
+		idx = U16(*ptr);
+		(*ptr) += 2;
+		if (print) {
+			if (gctx->data_block)
+				DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
+			else
+				DEBUG("ID[0x%04X]", idx);
+		}
+		val = U32(idx + gctx->data_block);
+		break;
+	case ATOM_ARG_FB:
+		idx = U8(*ptr);
+		(*ptr)++;
+		if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
+			DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
+				  gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
+			val = 0;
+		} else
+			val = gctx->scratch[(gctx->fb_base / 4) + idx];
+		if (print)
+			DEBUG("FB[0x%02X]", idx);
+		break;
+	case ATOM_ARG_IMM:
+		switch (align) {
+		case ATOM_SRC_DWORD:
+			val = U32(*ptr);
+			(*ptr) += 4;
+			if (print)
+				DEBUG("IMM 0x%08X\n", val);
+			return val;
+		case ATOM_SRC_WORD0:
+		case ATOM_SRC_WORD8:
+		case ATOM_SRC_WORD16:
+			val = U16(*ptr);
+			(*ptr) += 2;
+			if (print)
+				DEBUG("IMM 0x%04X\n", val);
+			return val;
+		case ATOM_SRC_BYTE0:
+		case ATOM_SRC_BYTE8:
+		case ATOM_SRC_BYTE16:
+		case ATOM_SRC_BYTE24:
+			val = U8(*ptr);
+			(*ptr)++;
+			if (print)
+				DEBUG("IMM 0x%02X\n", val);
+			return val;
+		}
+		return 0;
+	case ATOM_ARG_PLL:
+		idx = U8(*ptr);
+		(*ptr)++;
+		if (print)
+			DEBUG("PLL[0x%02X]", idx);
+		val = gctx->card->pll_read(gctx->card, idx);
+		break;
+	case ATOM_ARG_MC:
+		idx = U8(*ptr);
+		(*ptr)++;
+		if (print)
+			DEBUG("MC[0x%02X]", idx);
+		val = gctx->card->mc_read(gctx->card, idx);
+		break;
+	}
+	if (saved)
+		*saved = val;
+	val &= atom_arg_mask[align];
+	val >>= atom_arg_shift[align];
+	if (print)
+		switch (align) {
+		case ATOM_SRC_DWORD:
+			DEBUG(".[31:0] -> 0x%08X\n", val);
+			break;
+		case ATOM_SRC_WORD0:
+			DEBUG(".[15:0] -> 0x%04X\n", val);
+			break;
+		case ATOM_SRC_WORD8:
+			DEBUG(".[23:8] -> 0x%04X\n", val);
+			break;
+		case ATOM_SRC_WORD16:
+			DEBUG(".[31:16] -> 0x%04X\n", val);
+			break;
+		case ATOM_SRC_BYTE0:
+			DEBUG(".[7:0] -> 0x%02X\n", val);
+			break;
+		case ATOM_SRC_BYTE8:
+			DEBUG(".[15:8] -> 0x%02X\n", val);
+			break;
+		case ATOM_SRC_BYTE16:
+			DEBUG(".[23:16] -> 0x%02X\n", val);
+			break;
+		case ATOM_SRC_BYTE24:
+			DEBUG(".[31:24] -> 0x%02X\n", val);
+			break;
+		}
+	return val;
+}
+
+static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
+{
+	uint32_t align = (attr >> 3) & 7, arg = attr & 7;
+	switch (arg) {
+	case ATOM_ARG_REG:
+	case ATOM_ARG_ID:
+		(*ptr) += 2;
+		break;
+	case ATOM_ARG_PLL:
+	case ATOM_ARG_MC:
+	case ATOM_ARG_PS:
+	case ATOM_ARG_WS:
+	case ATOM_ARG_FB:
+		(*ptr)++;
+		break;
+	case ATOM_ARG_IMM:
+		switch (align) {
+		case ATOM_SRC_DWORD:
+			(*ptr) += 4;
+			return;
+		case ATOM_SRC_WORD0:
+		case ATOM_SRC_WORD8:
+		case ATOM_SRC_WORD16:
+			(*ptr) += 2;
+			return;
+		case ATOM_SRC_BYTE0:
+		case ATOM_SRC_BYTE8:
+		case ATOM_SRC_BYTE16:
+		case ATOM_SRC_BYTE24:
+			(*ptr)++;
+			return;
+		}
+		return;
+	}
+}
+
+static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
+{
+	return atom_get_src_int(ctx, attr, ptr, NULL, 1);
+}
+
+static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
+{
+	uint32_t val = 0xCDCDCDCD;
+
+	switch (align) {
+	case ATOM_SRC_DWORD:
+		val = U32(*ptr);
+		(*ptr) += 4;
+		break;
+	case ATOM_SRC_WORD0:
+	case ATOM_SRC_WORD8:
+	case ATOM_SRC_WORD16:
+		val = U16(*ptr);
+		(*ptr) += 2;
+		break;
+	case ATOM_SRC_BYTE0:
+	case ATOM_SRC_BYTE8:
+	case ATOM_SRC_BYTE16:
+	case ATOM_SRC_BYTE24:
+		val = U8(*ptr);
+		(*ptr)++;
+		break;
+	}
+	return val;
+}
+
+static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
+			     int *ptr, uint32_t *saved, int print)
+{
+	return atom_get_src_int(ctx,
+				arg | atom_dst_to_src[(attr >> 3) &
+						      7][(attr >> 6) & 3] << 3,
+				ptr, saved, print);
+}
+
+static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
+{
+	atom_skip_src_int(ctx,
+			  arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
+								 3] << 3, ptr);
+}
+
+static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
+			 int *ptr, uint32_t val, uint32_t saved)
+{
+	uint32_t align =
+	    atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
+	    val, idx;
+	struct atom_context *gctx = ctx->ctx;
+	old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
+	val <<= atom_arg_shift[align];
+	val &= atom_arg_mask[align];
+	saved &= ~atom_arg_mask[align];
+	val |= saved;
+	switch (arg) {
+	case ATOM_ARG_REG:
+		idx = U16(*ptr);
+		(*ptr) += 2;
+		DEBUG("REG[0x%04X]", idx);
+		idx += gctx->reg_block;
+		switch (gctx->io_mode) {
+		case ATOM_IO_MM:
+			if (idx == 0)
+				gctx->card->reg_write(gctx->card, idx,
+						      val << 2);
+			else
+				gctx->card->reg_write(gctx->card, idx, val);
+			break;
+		case ATOM_IO_PCI:
+			printk(KERN_INFO
+			       "PCI registers are not implemented.\n");
+			return;
+		case ATOM_IO_SYSIO:
+			printk(KERN_INFO
+			       "SYSIO registers are not implemented.\n");
+			return;
+		default:
+			if (!(gctx->io_mode & 0x80)) {
+				printk(KERN_INFO "Bad IO mode.\n");
+				return;
+			}
+			if (!gctx->iio[gctx->io_mode & 0xFF]) {
+				printk(KERN_INFO
+				       "Undefined indirect IO write method %d.\n",
+				       gctx->io_mode & 0x7F);
+				return;
+			}
+			atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
+					 idx, val);
+		}
+		break;
+	case ATOM_ARG_PS:
+		idx = U8(*ptr);
+		(*ptr)++;
+		DEBUG("PS[0x%02X]", idx);
+		ctx->ps[idx] = cpu_to_le32(val);
+		break;
+	case ATOM_ARG_WS:
+		idx = U8(*ptr);
+		(*ptr)++;
+		DEBUG("WS[0x%02X]", idx);
+		switch (idx) {
+		case ATOM_WS_QUOTIENT:
+			gctx->divmul[0] = val;
+			break;
+		case ATOM_WS_REMAINDER:
+			gctx->divmul[1] = val;
+			break;
+		case ATOM_WS_DATAPTR:
+			gctx->data_block = val;
+			break;
+		case ATOM_WS_SHIFT:
+			gctx->shift = val;
+			break;
+		case ATOM_WS_OR_MASK:
+		case ATOM_WS_AND_MASK:
+			break;
+		case ATOM_WS_FB_WINDOW:
+			gctx->fb_base = val;
+			break;
+		case ATOM_WS_ATTRIBUTES:
+			gctx->io_attr = val;
+			break;
+		case ATOM_WS_REGPTR:
+			gctx->reg_block = val;
+			break;
+		default:
+			ctx->ws[idx] = val;
+		}
+		break;
+	case ATOM_ARG_FB:
+		idx = U8(*ptr);
+		(*ptr)++;
+		if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
+			DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
+				  gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
+		} else
+			gctx->scratch[(gctx->fb_base / 4) + idx] = val;
+		DEBUG("FB[0x%02X]", idx);
+		break;
+	case ATOM_ARG_PLL:
+		idx = U8(*ptr);
+		(*ptr)++;
+		DEBUG("PLL[0x%02X]", idx);
+		gctx->card->pll_write(gctx->card, idx, val);
+		break;
+	case ATOM_ARG_MC:
+		idx = U8(*ptr);
+		(*ptr)++;
+		DEBUG("MC[0x%02X]", idx);
+		gctx->card->mc_write(gctx->card, idx, val);
+		return;
+	}
+	switch (align) {
+	case ATOM_SRC_DWORD:
+		DEBUG(".[31:0] <- 0x%08X\n", old_val);
+		break;
+	case ATOM_SRC_WORD0:
+		DEBUG(".[15:0] <- 0x%04X\n", old_val);
+		break;
+	case ATOM_SRC_WORD8:
+		DEBUG(".[23:8] <- 0x%04X\n", old_val);
+		break;
+	case ATOM_SRC_WORD16:
+		DEBUG(".[31:16] <- 0x%04X\n", old_val);
+		break;
+	case ATOM_SRC_BYTE0:
+		DEBUG(".[7:0] <- 0x%02X\n", old_val);
+		break;
+	case ATOM_SRC_BYTE8:
+		DEBUG(".[15:8] <- 0x%02X\n", old_val);
+		break;
+	case ATOM_SRC_BYTE16:
+		DEBUG(".[23:16] <- 0x%02X\n", old_val);
+		break;
+	case ATOM_SRC_BYTE24:
+		DEBUG(".[31:24] <- 0x%02X\n", old_val);
+		break;
+	}
+}
+
+static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src, saved;
+	int dptr = *ptr;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst += src;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src, saved;
+	int dptr = *ptr;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst &= src;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
+{
+	printk("ATOM BIOS beeped!\n");
+}
+
+static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
+{
+	int idx = U8((*ptr)++);
+	int r = 0;
+
+	if (idx < ATOM_TABLE_NAMES_CNT)
+		SDEBUG("   table: %d (%s)\n", idx, atom_table_names[idx]);
+	else
+		SDEBUG("   table: %d\n", idx);
+	if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
+		r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+	if (r) {
+		ctx->abort = true;
+	}
+}
+
+static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t saved;
+	int dptr = *ptr;
+	attr &= 0x38;
+	attr |= atom_def_dst[attr >> 3] << 6;
+	atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
+}
+
+static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src;
+	SDEBUG("   src1: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+	SDEBUG("   src2: ");
+	src = atom_get_src(ctx, attr, ptr);
+	ctx->ctx->cs_equal = (dst == src);
+	ctx->ctx->cs_above = (dst > src);
+	SDEBUG("   result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
+	       ctx->ctx->cs_above ? "GT" : "LE");
+}
+
+static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
+{
+	unsigned count = U8((*ptr)++);
+	SDEBUG("   count: %d\n", count);
+	if (arg == ATOM_UNIT_MICROSEC)
+		udelay(count);
+	else if (!drm_can_sleep())
+		mdelay(count);
+	else
+		msleep(count);
+}
+
+static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src;
+	SDEBUG("   src1: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+	SDEBUG("   src2: ");
+	src = atom_get_src(ctx, attr, ptr);
+	if (src != 0) {
+		ctx->ctx->divmul[0] = dst / src;
+		ctx->ctx->divmul[1] = dst % src;
+	} else {
+		ctx->ctx->divmul[0] = 0;
+		ctx->ctx->divmul[1] = 0;
+	}
+}
+
+static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
+{
+	/* functionally, a nop */
+}
+
+static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
+{
+	int execute = 0, target = U16(*ptr);
+	unsigned long cjiffies;
+
+	(*ptr) += 2;
+	switch (arg) {
+	case ATOM_COND_ABOVE:
+		execute = ctx->ctx->cs_above;
+		break;
+	case ATOM_COND_ABOVEOREQUAL:
+		execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
+		break;
+	case ATOM_COND_ALWAYS:
+		execute = 1;
+		break;
+	case ATOM_COND_BELOW:
+		execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
+		break;
+	case ATOM_COND_BELOWOREQUAL:
+		execute = !ctx->ctx->cs_above;
+		break;
+	case ATOM_COND_EQUAL:
+		execute = ctx->ctx->cs_equal;
+		break;
+	case ATOM_COND_NOTEQUAL:
+		execute = !ctx->ctx->cs_equal;
+		break;
+	}
+	if (arg != ATOM_COND_ALWAYS)
+		SDEBUG("   taken: %s\n", execute ? "yes" : "no");
+	SDEBUG("   target: 0x%04X\n", target);
+	if (execute) {
+		if (ctx->last_jump == (ctx->start + target)) {
+			cjiffies = jiffies;
+			if (time_after(cjiffies, ctx->last_jump_jiffies)) {
+				cjiffies -= ctx->last_jump_jiffies;
+				if ((jiffies_to_msecs(cjiffies) > 5000)) {
+					DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
+					ctx->abort = true;
+				}
+			} else {
+				/* jiffies wrap around we will just wait a little longer */
+				ctx->last_jump_jiffies = jiffies;
+			}
+		} else {
+			ctx->last_jump = ctx->start + target;
+			ctx->last_jump_jiffies = jiffies;
+		}
+		*ptr = ctx->start + target;
+	}
+}
+
+static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, mask, src, saved;
+	int dptr = *ptr;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
+	SDEBUG("   mask: 0x%08x", mask);
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst &= mask;
+	dst |= src;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t src, saved;
+	int dptr = *ptr;
+	if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
+		atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
+	else {
+		atom_skip_dst(ctx, arg, attr, ptr);
+		saved = 0xCDCDCDCD;
+	}
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, src, saved);
+}
+
+static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src;
+	SDEBUG("   src1: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+	SDEBUG("   src2: ");
+	src = atom_get_src(ctx, attr, ptr);
+	ctx->ctx->divmul[0] = dst * src;
+}
+
+static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
+{
+	/* nothing */
+}
+
+static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src, saved;
+	int dptr = *ptr;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst |= src;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t val = U8((*ptr)++);
+	SDEBUG("POST card output: 0x%02X\n", val);
+}
+
+static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
+{
+	printk(KERN_INFO "unimplemented!\n");
+}
+
+static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
+{
+	printk(KERN_INFO "unimplemented!\n");
+}
+
+static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
+{
+	printk(KERN_INFO "unimplemented!\n");
+}
+
+static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
+{
+	int idx = U8(*ptr);
+	(*ptr)++;
+	SDEBUG("   block: %d\n", idx);
+	if (!idx)
+		ctx->ctx->data_block = 0;
+	else if (idx == 255)
+		ctx->ctx->data_block = ctx->start;
+	else
+		ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
+	SDEBUG("   base: 0x%04X\n", ctx->ctx->data_block);
+}
+
+static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	SDEBUG("   fb_base: ");
+	ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
+}
+
+static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
+{
+	int port;
+	switch (arg) {
+	case ATOM_PORT_ATI:
+		port = U16(*ptr);
+		if (port < ATOM_IO_NAMES_CNT)
+			SDEBUG("   port: %d (%s)\n", port, atom_io_names[port]);
+		else
+			SDEBUG("   port: %d\n", port);
+		if (!port)
+			ctx->ctx->io_mode = ATOM_IO_MM;
+		else
+			ctx->ctx->io_mode = ATOM_IO_IIO | port;
+		(*ptr) += 2;
+		break;
+	case ATOM_PORT_PCI:
+		ctx->ctx->io_mode = ATOM_IO_PCI;
+		(*ptr)++;
+		break;
+	case ATOM_PORT_SYSIO:
+		ctx->ctx->io_mode = ATOM_IO_SYSIO;
+		(*ptr)++;
+		break;
+	}
+}
+
+static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
+{
+	ctx->ctx->reg_block = U16(*ptr);
+	(*ptr) += 2;
+	SDEBUG("   base: 0x%04X\n", ctx->ctx->reg_block);
+}
+
+static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++), shift;
+	uint32_t saved, dst;
+	int dptr = *ptr;
+	attr &= 0x38;
+	attr |= atom_def_dst[attr >> 3] << 6;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
+	SDEBUG("   shift: %d\n", shift);
+	dst <<= shift;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++), shift;
+	uint32_t saved, dst;
+	int dptr = *ptr;
+	attr &= 0x38;
+	attr |= atom_def_dst[attr >> 3] << 6;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
+	SDEBUG("   shift: %d\n", shift);
+	dst >>= shift;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++), shift;
+	uint32_t saved, dst;
+	int dptr = *ptr;
+	uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	/* op needs to full dst value */
+	dst = saved;
+	shift = atom_get_src(ctx, attr, ptr);
+	SDEBUG("   shift: %d\n", shift);
+	dst <<= shift;
+	dst &= atom_arg_mask[dst_align];
+	dst >>= atom_arg_shift[dst_align];
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++), shift;
+	uint32_t saved, dst;
+	int dptr = *ptr;
+	uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	/* op needs to full dst value */
+	dst = saved;
+	shift = atom_get_src(ctx, attr, ptr);
+	SDEBUG("   shift: %d\n", shift);
+	dst >>= shift;
+	dst &= atom_arg_mask[dst_align];
+	dst >>= atom_arg_shift[dst_align];
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src, saved;
+	int dptr = *ptr;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst -= src;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t src, val, target;
+	SDEBUG("   switch: ");
+	src = atom_get_src(ctx, attr, ptr);
+	while (U16(*ptr) != ATOM_CASE_END)
+		if (U8(*ptr) == ATOM_CASE_MAGIC) {
+			(*ptr)++;
+			SDEBUG("   case: ");
+			val =
+			    atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
+					 ptr);
+			target = U16(*ptr);
+			if (val == src) {
+				SDEBUG("   target: %04X\n", target);
+				*ptr = ctx->start + target;
+				return;
+			}
+			(*ptr) += 2;
+		} else {
+			printk(KERN_INFO "Bad case.\n");
+			return;
+		}
+	(*ptr) += 2;
+}
+
+static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src;
+	SDEBUG("   src1: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+	SDEBUG("   src2: ");
+	src = atom_get_src(ctx, attr, ptr);
+	ctx->ctx->cs_equal = ((dst & src) == 0);
+	SDEBUG("   result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
+}
+
+static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src, saved;
+	int dptr = *ptr;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst ^= src;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
+{
+	printk(KERN_INFO "unimplemented!\n");
+}
+
+static struct {
+	void (*func) (atom_exec_context *, int *, int);
+	int arg;
+} opcode_table[ATOM_OP_CNT] = {
+	{
+	NULL, 0}, {
+	atom_op_move, ATOM_ARG_REG}, {
+	atom_op_move, ATOM_ARG_PS}, {
+	atom_op_move, ATOM_ARG_WS}, {
+	atom_op_move, ATOM_ARG_FB}, {
+	atom_op_move, ATOM_ARG_PLL}, {
+	atom_op_move, ATOM_ARG_MC}, {
+	atom_op_and, ATOM_ARG_REG}, {
+	atom_op_and, ATOM_ARG_PS}, {
+	atom_op_and, ATOM_ARG_WS}, {
+	atom_op_and, ATOM_ARG_FB}, {
+	atom_op_and, ATOM_ARG_PLL}, {
+	atom_op_and, ATOM_ARG_MC}, {
+	atom_op_or, ATOM_ARG_REG}, {
+	atom_op_or, ATOM_ARG_PS}, {
+	atom_op_or, ATOM_ARG_WS}, {
+	atom_op_or, ATOM_ARG_FB}, {
+	atom_op_or, ATOM_ARG_PLL}, {
+	atom_op_or, ATOM_ARG_MC}, {
+	atom_op_shift_left, ATOM_ARG_REG}, {
+	atom_op_shift_left, ATOM_ARG_PS}, {
+	atom_op_shift_left, ATOM_ARG_WS}, {
+	atom_op_shift_left, ATOM_ARG_FB}, {
+	atom_op_shift_left, ATOM_ARG_PLL}, {
+	atom_op_shift_left, ATOM_ARG_MC}, {
+	atom_op_shift_right, ATOM_ARG_REG}, {
+	atom_op_shift_right, ATOM_ARG_PS}, {
+	atom_op_shift_right, ATOM_ARG_WS}, {
+	atom_op_shift_right, ATOM_ARG_FB}, {
+	atom_op_shift_right, ATOM_ARG_PLL}, {
+	atom_op_shift_right, ATOM_ARG_MC}, {
+	atom_op_mul, ATOM_ARG_REG}, {
+	atom_op_mul, ATOM_ARG_PS}, {
+	atom_op_mul, ATOM_ARG_WS}, {
+	atom_op_mul, ATOM_ARG_FB}, {
+	atom_op_mul, ATOM_ARG_PLL}, {
+	atom_op_mul, ATOM_ARG_MC}, {
+	atom_op_div, ATOM_ARG_REG}, {
+	atom_op_div, ATOM_ARG_PS}, {
+	atom_op_div, ATOM_ARG_WS}, {
+	atom_op_div, ATOM_ARG_FB}, {
+	atom_op_div, ATOM_ARG_PLL}, {
+	atom_op_div, ATOM_ARG_MC}, {
+	atom_op_add, ATOM_ARG_REG}, {
+	atom_op_add, ATOM_ARG_PS}, {
+	atom_op_add, ATOM_ARG_WS}, {
+	atom_op_add, ATOM_ARG_FB}, {
+	atom_op_add, ATOM_ARG_PLL}, {
+	atom_op_add, ATOM_ARG_MC}, {
+	atom_op_sub, ATOM_ARG_REG}, {
+	atom_op_sub, ATOM_ARG_PS}, {
+	atom_op_sub, ATOM_ARG_WS}, {
+	atom_op_sub, ATOM_ARG_FB}, {
+	atom_op_sub, ATOM_ARG_PLL}, {
+	atom_op_sub, ATOM_ARG_MC}, {
+	atom_op_setport, ATOM_PORT_ATI}, {
+	atom_op_setport, ATOM_PORT_PCI}, {
+	atom_op_setport, ATOM_PORT_SYSIO}, {
+	atom_op_setregblock, 0}, {
+	atom_op_setfbbase, 0}, {
+	atom_op_compare, ATOM_ARG_REG}, {
+	atom_op_compare, ATOM_ARG_PS}, {
+	atom_op_compare, ATOM_ARG_WS}, {
+	atom_op_compare, ATOM_ARG_FB}, {
+	atom_op_compare, ATOM_ARG_PLL}, {
+	atom_op_compare, ATOM_ARG_MC}, {
+	atom_op_switch, 0}, {
+	atom_op_jump, ATOM_COND_ALWAYS}, {
+	atom_op_jump, ATOM_COND_EQUAL}, {
+	atom_op_jump, ATOM_COND_BELOW}, {
+	atom_op_jump, ATOM_COND_ABOVE}, {
+	atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
+	atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
+	atom_op_jump, ATOM_COND_NOTEQUAL}, {
+	atom_op_test, ATOM_ARG_REG}, {
+	atom_op_test, ATOM_ARG_PS}, {
+	atom_op_test, ATOM_ARG_WS}, {
+	atom_op_test, ATOM_ARG_FB}, {
+	atom_op_test, ATOM_ARG_PLL}, {
+	atom_op_test, ATOM_ARG_MC}, {
+	atom_op_delay, ATOM_UNIT_MILLISEC}, {
+	atom_op_delay, ATOM_UNIT_MICROSEC}, {
+	atom_op_calltable, 0}, {
+	atom_op_repeat, 0}, {
+	atom_op_clear, ATOM_ARG_REG}, {
+	atom_op_clear, ATOM_ARG_PS}, {
+	atom_op_clear, ATOM_ARG_WS}, {
+	atom_op_clear, ATOM_ARG_FB}, {
+	atom_op_clear, ATOM_ARG_PLL}, {
+	atom_op_clear, ATOM_ARG_MC}, {
+	atom_op_nop, 0}, {
+	atom_op_eot, 0}, {
+	atom_op_mask, ATOM_ARG_REG}, {
+	atom_op_mask, ATOM_ARG_PS}, {
+	atom_op_mask, ATOM_ARG_WS}, {
+	atom_op_mask, ATOM_ARG_FB}, {
+	atom_op_mask, ATOM_ARG_PLL}, {
+	atom_op_mask, ATOM_ARG_MC}, {
+	atom_op_postcard, 0}, {
+	atom_op_beep, 0}, {
+	atom_op_savereg, 0}, {
+	atom_op_restorereg, 0}, {
+	atom_op_setdatablock, 0}, {
+	atom_op_xor, ATOM_ARG_REG}, {
+	atom_op_xor, ATOM_ARG_PS}, {
+	atom_op_xor, ATOM_ARG_WS}, {
+	atom_op_xor, ATOM_ARG_FB}, {
+	atom_op_xor, ATOM_ARG_PLL}, {
+	atom_op_xor, ATOM_ARG_MC}, {
+	atom_op_shl, ATOM_ARG_REG}, {
+	atom_op_shl, ATOM_ARG_PS}, {
+	atom_op_shl, ATOM_ARG_WS}, {
+	atom_op_shl, ATOM_ARG_FB}, {
+	atom_op_shl, ATOM_ARG_PLL}, {
+	atom_op_shl, ATOM_ARG_MC}, {
+	atom_op_shr, ATOM_ARG_REG}, {
+	atom_op_shr, ATOM_ARG_PS}, {
+	atom_op_shr, ATOM_ARG_WS}, {
+	atom_op_shr, ATOM_ARG_FB}, {
+	atom_op_shr, ATOM_ARG_PLL}, {
+	atom_op_shr, ATOM_ARG_MC}, {
+atom_op_debug, 0},};
+
+static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
+{
+	int base = CU16(ctx->cmd_table + 4 + 2 * index);
+	int len, ws, ps, ptr;
+	unsigned char op;
+	atom_exec_context ectx;
+	int ret = 0;
+
+	if (!base)
+		return -EINVAL;
+
+	len = CU16(base + ATOM_CT_SIZE_PTR);
+	ws = CU8(base + ATOM_CT_WS_PTR);
+	ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
+	ptr = base + ATOM_CT_CODE_PTR;
+
+	SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
+
+	ectx.ctx = ctx;
+	ectx.ps_shift = ps / 4;
+	ectx.start = base;
+	ectx.ps = params;
+	ectx.abort = false;
+	ectx.last_jump = 0;
+	if (ws)
+		ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
+	else
+		ectx.ws = NULL;
+
+	debug_depth++;
+	while (1) {
+		op = CU8(ptr++);
+		if (op < ATOM_OP_NAMES_CNT)
+			SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
+		else
+			SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
+		if (ectx.abort) {
+			DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
+				base, len, ws, ps, ptr - 1);
+			ret = -EINVAL;
+			goto free;
+		}
+
+		if (op < ATOM_OP_CNT && op > 0)
+			opcode_table[op].func(&ectx, &ptr,
+					      opcode_table[op].arg);
+		else
+			break;
+
+		if (op == ATOM_OP_EOT)
+			break;
+	}
+	debug_depth--;
+	SDEBUG("<<\n");
+
+free:
+	if (ws)
+		kfree(ectx.ws);
+	return ret;
+}
+
+int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+{
+	int r;
+
+	mutex_lock(&ctx->mutex);
+	/* reset data block */
+	ctx->data_block = 0;
+	/* reset reg block */
+	ctx->reg_block = 0;
+	/* reset fb window */
+	ctx->fb_base = 0;
+	/* reset io mode */
+	ctx->io_mode = ATOM_IO_MM;
+	/* reset divmul */
+	ctx->divmul[0] = 0;
+	ctx->divmul[1] = 0;
+	r = amdgpu_atom_execute_table_locked(ctx, index, params);
+	mutex_unlock(&ctx->mutex);
+	return r;
+}
+
+static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
+
+static void atom_index_iio(struct atom_context *ctx, int base)
+{
+	ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
+	if (!ctx->iio)
+		return;
+	while (CU8(base) == ATOM_IIO_START) {
+		ctx->iio[CU8(base + 1)] = base + 2;
+		base += 2;
+		while (CU8(base) != ATOM_IIO_END)
+			base += atom_iio_len[CU8(base)];
+		base += 3;
+	}
+}
+
+struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
+{
+	int base;
+	struct atom_context *ctx =
+	    kzalloc(sizeof(struct atom_context), GFP_KERNEL);
+	char *str;
+	char name[512];
+	int i;
+
+	if (!ctx)
+		return NULL;
+
+	ctx->card = card;
+	ctx->bios = bios;
+
+	if (CU16(0) != ATOM_BIOS_MAGIC) {
+		printk(KERN_INFO "Invalid BIOS magic.\n");
+		kfree(ctx);
+		return NULL;
+	}
+	if (strncmp
+	    (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
+	     strlen(ATOM_ATI_MAGIC))) {
+		printk(KERN_INFO "Invalid ATI magic.\n");
+		kfree(ctx);
+		return NULL;
+	}
+
+	base = CU16(ATOM_ROM_TABLE_PTR);
+	if (strncmp
+	    (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
+	     strlen(ATOM_ROM_MAGIC))) {
+		printk(KERN_INFO "Invalid ATOM magic.\n");
+		kfree(ctx);
+		return NULL;
+	}
+
+	ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
+	ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
+	atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
+	if (!ctx->iio) {
+		amdgpu_atom_destroy(ctx);
+		return NULL;
+	}
+
+	str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
+	while (*str && ((*str == '\n') || (*str == '\r')))
+		str++;
+	/* name string isn't always 0 terminated */
+	for (i = 0; i < 511; i++) {
+		name[i] = str[i];
+		if (name[i] < '.' || name[i] > 'z') {
+			name[i] = 0;
+			break;
+		}
+	}
+	printk(KERN_INFO "ATOM BIOS: %s\n", name);
+
+	return ctx;
+}
+
+int amdgpu_atom_asic_init(struct atom_context *ctx)
+{
+	int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
+	uint32_t ps[16];
+	int ret;
+
+	memset(ps, 0, 64);
+
+	ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
+	ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
+	if (!ps[0] || !ps[1])
+		return 1;
+
+	if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
+		return 1;
+	ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps);
+	if (ret)
+		return ret;
+
+	memset(ps, 0, 64);
+
+	return ret;
+}
+
+void amdgpu_atom_destroy(struct atom_context *ctx)
+{
+	kfree(ctx->iio);
+	kfree(ctx);
+}
+
+bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index,
+			    uint16_t * size, uint8_t * frev, uint8_t * crev,
+			    uint16_t * data_start)
+{
+	int offset = index * 2 + 4;
+	int idx = CU16(ctx->data_table + offset);
+	u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
+
+	if (!mdt[index])
+		return false;
+
+	if (size)
+		*size = CU16(idx);
+	if (frev)
+		*frev = CU8(idx + 2);
+	if (crev)
+		*crev = CU8(idx + 3);
+	*data_start = idx;
+	return true;
+}
+
+bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
+			   uint8_t * crev)
+{
+	int offset = index * 2 + 4;
+	int idx = CU16(ctx->cmd_table + offset);
+	u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
+
+	if (!mct[index])
+		return false;
+
+	if (frev)
+		*frev = CU8(idx + 2);
+	if (crev)
+		*crev = CU8(idx + 3);
+	return true;
+}
+
+int amdgpu_atom_allocate_fb_scratch(struct atom_context *ctx)
+{
+	int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
+	uint16_t data_offset;
+	int usage_bytes = 0;
+	struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
+
+	if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
+		firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
+
+		DRM_DEBUG("atom firmware requested %08x %dkb\n",
+			  le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
+			  le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
+
+		usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
+	}
+	ctx->scratch_size_bytes = 0;
+	if (usage_bytes == 0)
+		usage_bytes = 20 * 1024;
+	/* allocate some scratch memory */
+	ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
+	if (!ctx->scratch)
+		return -ENOMEM;
+	ctx->scratch_size_bytes = usage_bytes;
+	return 0;
+}

+ 159 - 0
drivers/gpu/drm/amd/amdgpu/atom.h

@@ -0,0 +1,159 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#ifndef ATOM_H
+#define ATOM_H
+
+#include <linux/types.h>
+#include <drm/drmP.h>
+
+#define ATOM_BIOS_MAGIC		0xAA55
+#define ATOM_ATI_MAGIC_PTR	0x30
+#define ATOM_ATI_MAGIC		" 761295520"
+#define ATOM_ROM_TABLE_PTR	0x48
+
+#define ATOM_ROM_MAGIC		"ATOM"
+#define ATOM_ROM_MAGIC_PTR	4
+
+#define ATOM_ROM_MSG_PTR	0x10
+#define ATOM_ROM_CMD_PTR	0x1E
+#define ATOM_ROM_DATA_PTR	0x20
+
+#define ATOM_CMD_INIT		0
+#define ATOM_CMD_SETSCLK	0x0A
+#define ATOM_CMD_SETMCLK	0x0B
+#define ATOM_CMD_SETPCLK	0x0C
+#define ATOM_CMD_SPDFANCNTL	0x39
+
+#define ATOM_DATA_FWI_PTR	0xC
+#define ATOM_DATA_IIO_PTR	0x32
+
+#define ATOM_FWI_DEFSCLK_PTR	8
+#define ATOM_FWI_DEFMCLK_PTR	0xC
+#define ATOM_FWI_MAXSCLK_PTR	0x24
+#define ATOM_FWI_MAXMCLK_PTR	0x28
+
+#define ATOM_CT_SIZE_PTR	0
+#define ATOM_CT_WS_PTR		4
+#define ATOM_CT_PS_PTR		5
+#define ATOM_CT_PS_MASK		0x7F
+#define ATOM_CT_CODE_PTR	6
+
+#define ATOM_OP_CNT		123
+#define ATOM_OP_EOT		91
+
+#define ATOM_CASE_MAGIC		0x63
+#define ATOM_CASE_END		0x5A5A
+
+#define ATOM_ARG_REG		0
+#define ATOM_ARG_PS		1
+#define ATOM_ARG_WS		2
+#define ATOM_ARG_FB		3
+#define ATOM_ARG_ID		4
+#define ATOM_ARG_IMM		5
+#define ATOM_ARG_PLL		6
+#define ATOM_ARG_MC		7
+
+#define ATOM_SRC_DWORD		0
+#define ATOM_SRC_WORD0		1
+#define ATOM_SRC_WORD8		2
+#define ATOM_SRC_WORD16		3
+#define ATOM_SRC_BYTE0		4
+#define ATOM_SRC_BYTE8		5
+#define ATOM_SRC_BYTE16		6
+#define ATOM_SRC_BYTE24		7
+
+#define ATOM_WS_QUOTIENT	0x40
+#define ATOM_WS_REMAINDER	0x41
+#define ATOM_WS_DATAPTR		0x42
+#define ATOM_WS_SHIFT		0x43
+#define ATOM_WS_OR_MASK		0x44
+#define ATOM_WS_AND_MASK	0x45
+#define ATOM_WS_FB_WINDOW	0x46
+#define ATOM_WS_ATTRIBUTES	0x47
+#define ATOM_WS_REGPTR  	0x48
+
+#define ATOM_IIO_NOP		0
+#define ATOM_IIO_START		1
+#define ATOM_IIO_READ		2
+#define ATOM_IIO_WRITE		3
+#define ATOM_IIO_CLEAR		4
+#define ATOM_IIO_SET		5
+#define ATOM_IIO_MOVE_INDEX	6
+#define ATOM_IIO_MOVE_ATTR	7
+#define ATOM_IIO_MOVE_DATA	8
+#define ATOM_IIO_END		9
+
+#define ATOM_IO_MM		0
+#define ATOM_IO_PCI		1
+#define ATOM_IO_SYSIO		2
+#define ATOM_IO_IIO		0x80
+
+struct card_info {
+	struct drm_device *dev;
+	void (* reg_write)(struct card_info *, uint32_t, uint32_t);   /*  filled by driver */
+	uint32_t (* reg_read)(struct card_info *, uint32_t);          /*  filled by driver */
+	void (* ioreg_write)(struct card_info *, uint32_t, uint32_t);   /*  filled by driver */
+	uint32_t (* ioreg_read)(struct card_info *, uint32_t);          /*  filled by driver */
+	void (* mc_write)(struct card_info *, uint32_t, uint32_t);   /*  filled by driver */
+	uint32_t (* mc_read)(struct card_info *, uint32_t);          /*  filled by driver */
+	void (* pll_write)(struct card_info *, uint32_t, uint32_t);   /*  filled by driver */
+	uint32_t (* pll_read)(struct card_info *, uint32_t);          /*  filled by driver */
+};
+
+struct atom_context {
+	struct card_info *card;
+	struct mutex mutex;
+	void *bios;
+	uint32_t cmd_table, data_table;
+	uint16_t *iio;
+
+	uint16_t data_block;
+	uint32_t fb_base;
+	uint32_t divmul[2];
+	uint16_t io_attr;
+	uint16_t reg_block;
+	uint8_t shift;
+	int cs_equal, cs_above;
+	int io_mode;
+	uint32_t *scratch;
+	int scratch_size_bytes;
+};
+
+extern int amdgpu_atom_debug;
+
+struct atom_context *amdgpu_atom_parse(struct card_info *, void *);
+int amdgpu_atom_execute_table(struct atom_context *, int, uint32_t *);
+int amdgpu_atom_asic_init(struct atom_context *);
+void amdgpu_atom_destroy(struct atom_context *);
+bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
+			    uint8_t *frev, uint8_t *crev, uint16_t *data_start);
+bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index,
+			   uint8_t *frev, uint8_t *crev);
+int amdgpu_atom_allocate_fb_scratch(struct atom_context *ctx);
+#include "atom-types.h"
+#include "atombios.h"
+#include "ObjectID.h"
+
+#endif

+ 8555 - 0
drivers/gpu/drm/amd/amdgpu/atombios.h

@@ -0,0 +1,8555 @@
+/*
+ * Copyright 2006-2007 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/****************************************************************************/
+/*Portion I: Definitions  shared between VBIOS and Driver                   */
+/****************************************************************************/
+
+#ifndef _ATOMBIOS_H
+#define _ATOMBIOS_H
+
+#define ATOM_VERSION_MAJOR                   0x00020000
+#define ATOM_VERSION_MINOR                   0x00000002
+
+#define ATOM_HEADER_VERSION (ATOM_VERSION_MAJOR | ATOM_VERSION_MINOR)
+
+/* Endianness should be specified before inclusion,
+ * default to little endian
+ */
+#ifndef ATOM_BIG_ENDIAN
+#error Endian not specified
+#endif
+
+#ifdef _H2INC
+  #ifndef ULONG
+    typedef unsigned long ULONG;
+  #endif
+
+  #ifndef UCHAR
+    typedef unsigned char UCHAR;
+  #endif
+
+  #ifndef USHORT
+    typedef unsigned short USHORT;
+  #endif
+#endif
+
+#define ATOM_DAC_A            0
+#define ATOM_DAC_B            1
+#define ATOM_EXT_DAC          2
+
+#define ATOM_CRTC1            0
+#define ATOM_CRTC2            1
+#define ATOM_CRTC3            2
+#define ATOM_CRTC4            3
+#define ATOM_CRTC5            4
+#define ATOM_CRTC6            5
+
+#define ATOM_UNDERLAY_PIPE0   16
+#define ATOM_UNDERLAY_PIPE1   17
+
+#define ATOM_CRTC_INVALID     0xFF
+
+#define ATOM_DIGA             0
+#define ATOM_DIGB             1
+
+#define ATOM_PPLL1            0
+#define ATOM_PPLL2            1
+#define ATOM_DCPLL            2
+#define ATOM_PPLL0            2
+#define ATOM_PPLL3            3
+
+#define ATOM_EXT_PLL1         8
+#define ATOM_EXT_PLL2         9
+#define ATOM_EXT_CLOCK        10
+#define ATOM_PPLL_INVALID     0xFF
+
+#define ENCODER_REFCLK_SRC_P1PLL       0
+#define ENCODER_REFCLK_SRC_P2PLL       1
+#define ENCODER_REFCLK_SRC_DCPLL       2
+#define ENCODER_REFCLK_SRC_EXTCLK      3
+#define ENCODER_REFCLK_SRC_INVALID     0xFF
+
+#define ATOM_SCALER_DISABLE   0   //For Fudo, it's bypass and auto-cengter & no replication
+#define ATOM_SCALER_CENTER    1   //For Fudo, it's bypass and auto-center & auto replication
+#define ATOM_SCALER_EXPANSION 2   //For Fudo, it's 2 Tap alpha blending mode
+#define ATOM_SCALER_MULTI_EX  3   //For Fudo only, it's multi-tap mode only used to drive TV or CV, only used by Bios
+
+#define ATOM_DISABLE          0
+#define ATOM_ENABLE           1
+#define ATOM_LCD_BLOFF                          (ATOM_DISABLE+2)
+#define ATOM_LCD_BLON                           (ATOM_ENABLE+2)
+#define ATOM_LCD_BL_BRIGHTNESS_CONTROL          (ATOM_ENABLE+3)
+#define ATOM_LCD_SELFTEST_START                 (ATOM_DISABLE+5)
+#define ATOM_LCD_SELFTEST_STOP                  (ATOM_ENABLE+5)
+#define ATOM_ENCODER_INIT                       (ATOM_DISABLE+7)
+#define ATOM_INIT                               (ATOM_DISABLE+7)
+#define ATOM_GET_STATUS                         (ATOM_DISABLE+8)
+
+#define ATOM_BLANKING         1
+#define ATOM_BLANKING_OFF     0
+
+
+#define ATOM_CRT1             0
+#define ATOM_CRT2             1
+
+#define ATOM_TV_NTSC          1
+#define ATOM_TV_NTSCJ         2
+#define ATOM_TV_PAL           3
+#define ATOM_TV_PALM          4
+#define ATOM_TV_PALCN         5
+#define ATOM_TV_PALN          6
+#define ATOM_TV_PAL60         7
+#define ATOM_TV_SECAM         8
+#define ATOM_TV_CV            16
+
+#define ATOM_DAC1_PS2         1
+#define ATOM_DAC1_CV          2
+#define ATOM_DAC1_NTSC        3
+#define ATOM_DAC1_PAL         4
+
+#define ATOM_DAC2_PS2         ATOM_DAC1_PS2
+#define ATOM_DAC2_CV          ATOM_DAC1_CV
+#define ATOM_DAC2_NTSC        ATOM_DAC1_NTSC
+#define ATOM_DAC2_PAL         ATOM_DAC1_PAL
+
+#define ATOM_PM_ON            0
+#define ATOM_PM_STANDBY       1
+#define ATOM_PM_SUSPEND       2
+#define ATOM_PM_OFF           3
+
+// For ATOM_LVDS_INFO_V12
+// Bit0:{=0:single, =1:dual},
+// Bit1 {=0:666RGB, =1:888RGB},
+// Bit2:3:{Grey level}
+// Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
+#define ATOM_PANEL_MISC_DUAL               0x00000001
+#define ATOM_PANEL_MISC_888RGB             0x00000002
+#define ATOM_PANEL_MISC_GREY_LEVEL         0x0000000C
+#define ATOM_PANEL_MISC_FPDI               0x00000010
+#define ATOM_PANEL_MISC_GREY_LEVEL_SHIFT   2
+#define ATOM_PANEL_MISC_SPATIAL            0x00000020
+#define ATOM_PANEL_MISC_TEMPORAL           0x00000040
+#define ATOM_PANEL_MISC_API_ENABLED        0x00000080
+
+#define MEMTYPE_DDR1                       "DDR1"
+#define MEMTYPE_DDR2                       "DDR2"
+#define MEMTYPE_DDR3                       "DDR3"
+#define MEMTYPE_DDR4                       "DDR4"
+
+#define ASIC_BUS_TYPE_PCI                  "PCI"
+#define ASIC_BUS_TYPE_AGP                  "AGP"
+#define ASIC_BUS_TYPE_PCIE                 "PCI_EXPRESS"
+
+//Maximum size of that FireGL flag string
+#define ATOM_FIREGL_FLAG_STRING            "FGL"      //Flag used to enable FireGL Support
+#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING     3     //sizeof( ATOM_FIREGL_FLAG_STRING )
+
+#define ATOM_FAKE_DESKTOP_STRING           "DSK"      //Flag used to enable mobile ASIC on Desktop
+#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING    ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING
+
+#define ATOM_M54T_FLAG_STRING              "M54T"     //Flag used to enable M54T Support
+#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING  4          //sizeof( ATOM_M54T_FLAG_STRING )
+
+#define HW_ASSISTED_I2C_STATUS_FAILURE     2
+#define HW_ASSISTED_I2C_STATUS_SUCCESS     1
+
+#pragma pack(1)                                       // BIOS data must use byte aligment
+
+// Define offset to location of ROM header.
+#define OFFSET_TO_POINTER_TO_ATOM_ROM_HEADER         0x00000048L
+#define OFFSET_TO_ATOM_ROM_IMAGE_SIZE                0x00000002L
+
+#define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE         0x94
+#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE        20    //including the terminator 0x0!
+#define OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER      0x002f
+#define OFFSET_TO_GET_ATOMBIOS_STRINGS_START       0x006e
+
+/****************************************************************************/
+// Common header for all tables (Data table, Command table).
+// Every table pointed  _ATOM_MASTER_DATA_TABLE has this common header.
+// And the pointer actually points to this header.
+/****************************************************************************/
+
+typedef struct _ATOM_COMMON_TABLE_HEADER
+{
+  USHORT usStructureSize;
+  UCHAR  ucTableFormatRevision;   //Change it when the Parser is not backward compatible
+  UCHAR  ucTableContentRevision;  //Change it only when the table needs to change but the firmware
+                                  //Image can't be updated, while Driver needs to carry the new table!
+}ATOM_COMMON_TABLE_HEADER;
+
+/****************************************************************************/
+// Structure stores the ROM header.
+/****************************************************************************/
+typedef struct _ATOM_ROM_HEADER
+{
+  ATOM_COMMON_TABLE_HEADER      sHeader;
+  UCHAR  uaFirmWareSignature[4];    //Signature to distinguish between Atombios and non-atombios,
+                                    //atombios should init it as "ATOM", don't change the position
+  USHORT usBiosRuntimeSegmentAddress;
+  USHORT usProtectedModeInfoOffset;
+  USHORT usConfigFilenameOffset;
+  USHORT usCRC_BlockOffset;
+  USHORT usBIOS_BootupMessageOffset;
+  USHORT usInt10Offset;
+  USHORT usPciBusDevInitCode;
+  USHORT usIoBaseAddress;
+  USHORT usSubsystemVendorID;
+  USHORT usSubsystemID;
+  USHORT usPCI_InfoOffset;
+  USHORT usMasterCommandTableOffset;//Offest for SW to get all command table offsets, Don't change the position
+  USHORT usMasterDataTableOffset;   //Offest for SW to get all data table offsets, Don't change the position
+  UCHAR  ucExtendedFunctionCode;
+  UCHAR  ucReserved;
+}ATOM_ROM_HEADER;
+
+//==============================Command Table Portion====================================
+
+
+/****************************************************************************/
+// Structures used in Command.mtb
+/****************************************************************************/
+typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
+  USHORT ASIC_Init;                              //Function Table, used by various SW components,latest version 1.1
+  USHORT GetDisplaySurfaceSize;                  //Atomic Table,  Used by Bios when enabling HW ICON
+  USHORT ASIC_RegistersInit;                     //Atomic Table,  indirectly used by various SW components,called from ASIC_Init
+  USHORT VRAM_BlockVenderDetection;              //Atomic Table,  used only by Bios
+  USHORT DIGxEncoderControl;                     //Only used by Bios
+  USHORT MemoryControllerInit;                   //Atomic Table,  indirectly used by various SW components,called from ASIC_Init
+  USHORT EnableCRTCMemReq;                       //Function Table,directly used by various SW components,latest version 2.1
+  USHORT MemoryParamAdjust;                      //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock if needed
+  USHORT DVOEncoderControl;                      //Function Table,directly used by various SW components,latest version 1.2
+  USHORT GPIOPinControl;                         //Atomic Table,  only used by Bios
+  USHORT SetEngineClock;                         //Function Table,directly used by various SW components,latest version 1.1
+  USHORT SetMemoryClock;                         //Function Table,directly used by various SW components,latest version 1.1
+  USHORT SetPixelClock;                          //Function Table,directly used by various SW components,latest version 1.2
+  USHORT EnableDispPowerGating;                  //Atomic Table,  indirectly used by various SW components,called from ASIC_Init
+  USHORT ResetMemoryDLL;                         //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
+  USHORT ResetMemoryDevice;                      //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
+  USHORT MemoryPLLInit;                          //Atomic Table,  used only by Bios
+  USHORT AdjustDisplayPll;                       //Atomic Table,  used by various SW componentes.
+  USHORT AdjustMemoryController;                 //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
+  USHORT EnableASIC_StaticPwrMgt;                //Atomic Table,  only used by Bios
+  USHORT SetUniphyInstance;                      //Atomic Table,  only used by Bios
+  USHORT DAC_LoadDetection;                      //Atomic Table,  directly used by various SW components,latest version 1.2
+  USHORT LVTMAEncoderControl;                    //Atomic Table,directly used by various SW components,latest version 1.3
+  USHORT HW_Misc_Operation;                      //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT DAC1EncoderControl;                     //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT DAC2EncoderControl;                     //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT DVOOutputControl;                       //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT CV1OutputControl;                       //Atomic Table,  Atomic Table,  Obsolete from Ry6xx, use DAC2 Output instead
+  USHORT GetConditionalGoldenSetting;            //Only used by Bios
+  USHORT SMC_Init;                               //Function Table,directly used by various SW components,latest version 1.1
+  USHORT PatchMCSetting;                         //only used by BIOS
+  USHORT MC_SEQ_Control;                         //only used by BIOS
+  USHORT Gfx_Harvesting;                         //Atomic Table,  Obsolete from Ry6xx, Now only used by BIOS for GFX harvesting
+  USHORT EnableScaler;                           //Atomic Table,  used only by Bios
+  USHORT BlankCRTC;                              //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT EnableCRTC;                             //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT GetPixelClock;                          //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT EnableVGA_Render;                       //Function Table,directly used by various SW components,latest version 1.1
+  USHORT GetSCLKOverMCLKRatio;                   //Atomic Table,  only used by Bios
+  USHORT SetCRTC_Timing;                         //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT SetCRTC_OverScan;                       //Atomic Table,  used by various SW components,latest version 1.1
+  USHORT SetCRTC_Replication;                    //Atomic Table,  used only by Bios
+  USHORT SelectCRTC_Source;                      //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT EnableGraphSurfaces;                    //Atomic Table,  used only by Bios
+  USHORT UpdateCRTC_DoubleBufferRegisters;       //Atomic Table,  used only by Bios
+  USHORT LUT_AutoFill;                           //Atomic Table,  only used by Bios
+  USHORT EnableHW_IconCursor;                    //Atomic Table,  only used by Bios
+  USHORT GetMemoryClock;                         //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT GetEngineClock;                         //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT SetCRTC_UsingDTDTiming;                 //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT ExternalEncoderControl;                 //Atomic Table,  directly used by various SW components,latest version 2.1
+  USHORT LVTMAOutputControl;                     //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT VRAM_BlockDetectionByStrap;             //Atomic Table,  used only by Bios
+  USHORT MemoryCleanUp;                          //Atomic Table,  only used by Bios
+  USHORT ProcessI2cChannelTransaction;           //Function Table,only used by Bios
+  USHORT WriteOneByteToHWAssistedI2C;            //Function Table,indirectly used by various SW components
+  USHORT ReadHWAssistedI2CStatus;                //Atomic Table,  indirectly used by various SW components
+  USHORT SpeedFanControl;                        //Function Table,indirectly used by various SW components,called from ASIC_Init
+  USHORT PowerConnectorDetection;                //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT MC_Synchronization;                     //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
+  USHORT ComputeMemoryEnginePLL;                 //Atomic Table,  indirectly used by various SW components,called from SetMemory/EngineClock
+  USHORT MemoryRefreshConversion;                //Atomic Table,  indirectly used by various SW components,called from SetMemory or SetEngineClock
+  USHORT VRAM_GetCurrentInfoBlock;               //Atomic Table,  used only by Bios
+  USHORT DynamicMemorySettings;                  //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
+  USHORT MemoryTraining;                         //Atomic Table,  used only by Bios
+  USHORT EnableSpreadSpectrumOnPPLL;             //Atomic Table,  directly used by various SW components,latest version 1.2
+  USHORT TMDSAOutputControl;                     //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT SetVoltage;                             //Function Table,directly and/or indirectly used by various SW components,latest version 1.1
+  USHORT DAC1OutputControl;                      //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT ReadEfuseValue;                         //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT ComputeMemoryClockParam;                //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
+  USHORT ClockSource;                            //Atomic Table,  indirectly used by various SW components,called from ASIC_Init
+  USHORT MemoryDeviceInit;                       //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
+  USHORT GetDispObjectInfo;                      //Atomic Table,  indirectly used by various SW components,called from EnableVGARender
+  USHORT DIG1EncoderControl;                     //Atomic Table,directly used by various SW components,latest version 1.1
+  USHORT DIG2EncoderControl;                     //Atomic Table,directly used by various SW components,latest version 1.1
+  USHORT DIG1TransmitterControl;                 //Atomic Table,directly used by various SW components,latest version 1.1
+  USHORT DIG2TransmitterControl;                 //Atomic Table,directly used by various SW components,latest version 1.1
+  USHORT ProcessAuxChannelTransaction;           //Function Table,only used by Bios
+  USHORT DPEncoderService;                       //Function Table,only used by Bios
+  USHORT GetVoltageInfo;                         //Function Table,only used by Bios since SI
+}ATOM_MASTER_LIST_OF_COMMAND_TABLES;
+
+// For backward compatible
+#define ReadEDIDFromHWAssistedI2C                ProcessI2cChannelTransaction
+#define DPTranslatorControl                      DIG2EncoderControl
+#define UNIPHYTransmitterControl                 DIG1TransmitterControl
+#define LVTMATransmitterControl                  DIG2TransmitterControl
+#define SetCRTC_DPM_State                        GetConditionalGoldenSetting
+#define ASIC_StaticPwrMgtStatusChange            SetUniphyInstance
+#define HPDInterruptService                      ReadHWAssistedI2CStatus
+#define EnableVGA_Access                         GetSCLKOverMCLKRatio
+#define EnableYUV                                GetDispObjectInfo
+#define DynamicClockGating                       EnableDispPowerGating
+#define SetupHWAssistedI2CStatus                 ComputeMemoryClockParam
+#define DAC2OutputControl                        ReadEfuseValue
+
+#define TMDSAEncoderControl                      PatchMCSetting
+#define LVDSEncoderControl                       MC_SEQ_Control
+#define LCD1OutputControl                        HW_Misc_Operation
+#define TV1OutputControl                         Gfx_Harvesting
+#define TVEncoderControl                         SMC_Init
+
+typedef struct _ATOM_MASTER_COMMAND_TABLE
+{
+  ATOM_COMMON_TABLE_HEADER           sHeader;
+  ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables;
+}ATOM_MASTER_COMMAND_TABLE;
+
+/****************************************************************************/
+// Structures used in every command table
+/****************************************************************************/
+typedef struct _ATOM_TABLE_ATTRIBUTE
+{
+#if ATOM_BIG_ENDIAN
+  USHORT  UpdatedByUtility:1;         //[15]=Table updated by utility flag
+  USHORT  PS_SizeInBytes:7;           //[14:8]=Size of parameter space in Bytes (multiple of a dword),
+  USHORT  WS_SizeInBytes:8;           //[7:0]=Size of workspace in Bytes (in multiple of a dword),
+#else
+  USHORT  WS_SizeInBytes:8;           //[7:0]=Size of workspace in Bytes (in multiple of a dword),
+  USHORT  PS_SizeInBytes:7;           //[14:8]=Size of parameter space in Bytes (multiple of a dword),
+  USHORT  UpdatedByUtility:1;         //[15]=Table updated by utility flag
+#endif
+}ATOM_TABLE_ATTRIBUTE;
+
+/****************************************************************************/
+// Common header for all command tables.
+// Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header.
+// And the pointer actually points to this header.
+/****************************************************************************/
+typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER
+{
+  ATOM_COMMON_TABLE_HEADER CommonHeader;
+  ATOM_TABLE_ATTRIBUTE     TableAttribute;
+}ATOM_COMMON_ROM_COMMAND_TABLE_HEADER;
+
+/****************************************************************************/
+// Structures used by ComputeMemoryEnginePLLTable
+/****************************************************************************/
+
+#define COMPUTE_MEMORY_PLL_PARAM        1
+#define COMPUTE_ENGINE_PLL_PARAM        2
+#define ADJUST_MC_SETTING_PARAM         3
+
+/****************************************************************************/
+// Structures used by AdjustMemoryControllerTable
+/****************************************************************************/
+typedef struct _ATOM_ADJUST_MEMORY_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+  ULONG ulPointerReturnFlag:1;      // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block
+  ULONG ulMemoryModuleNumber:7;     // BYTE_3[6:0]
+  ULONG ulClockFreq:24;
+#else
+  ULONG ulClockFreq:24;
+  ULONG ulMemoryModuleNumber:7;     // BYTE_3[6:0]
+  ULONG ulPointerReturnFlag:1;      // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block
+#endif
+}ATOM_ADJUST_MEMORY_CLOCK_FREQ;
+#define POINTER_RETURN_FLAG             0x80
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
+{
+  ULONG   ulClock;        //When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div
+  UCHAR   ucAction;       //0:reserved //1:Memory //2:Engine
+  UCHAR   ucReserved;     //may expand to return larger Fbdiv later
+  UCHAR   ucFbDiv;        //return value
+  UCHAR   ucPostDiv;      //return value
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS;
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2
+{
+  ULONG   ulClock;        //When return, [23:0] return real clock
+  UCHAR   ucAction;       //0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register
+  USHORT  usFbDiv;          //return Feedback value to be written to register
+  UCHAR   ucPostDiv;      //return post div to be written to register
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2;
+
+#define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION   COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
+
+#define SET_CLOCK_FREQ_MASK                       0x00FFFFFF  //Clock change tables only take bit [23:0] as the requested clock value
+#define USE_NON_BUS_CLOCK_MASK                    0x01000000  //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa)
+#define USE_MEMORY_SELF_REFRESH_MASK              0x02000000   //Only applicable to memory clock change, when set, using memory self refresh during clock transition
+#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE     0x04000000  //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change
+#define FIRST_TIME_CHANGE_CLOCK                   0x08000000   //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
+#define SKIP_SW_PROGRAM_PLL                       0x10000000   //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
+#define USE_SS_ENABLED_PIXEL_CLOCK                USE_NON_BUS_CLOCK_MASK
+
+#define b3USE_NON_BUS_CLOCK_MASK                  0x01       //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa)
+#define b3USE_MEMORY_SELF_REFRESH                 0x02        //Only applicable to memory clock change, when set, using memory self refresh during clock transition
+#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE   0x04       //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change
+#define b3FIRST_TIME_CHANGE_CLOCK                 0x08       //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
+#define b3SKIP_SW_PROGRAM_PLL                     0x10       //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
+#define b3DRAM_SELF_REFRESH_EXIT                  0x20       //Applicable to DRAM self refresh exit only. when set, it means it will go to program DRAM self refresh exit path
+
+typedef struct _ATOM_COMPUTE_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+  ULONG ulComputeClockFlag:8;                 // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM
+  ULONG ulClockFreq:24;                       // in unit of 10kHz
+#else
+  ULONG ulClockFreq:24;                       // in unit of 10kHz
+  ULONG ulComputeClockFlag:8;                 // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM
+#endif
+}ATOM_COMPUTE_CLOCK_FREQ;
+
+typedef struct _ATOM_S_MPLL_FB_DIVIDER
+{
+  USHORT usFbDivFrac;
+  USHORT usFbDiv;
+}ATOM_S_MPLL_FB_DIVIDER;
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
+{
+  union
+  {
+    ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+    ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter
+  };
+  UCHAR   ucRefDiv;                           //Output Parameter
+  UCHAR   ucPostDiv;                          //Output Parameter
+  UCHAR   ucCntlFlag;                         //Output Parameter
+  UCHAR   ucReserved;
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3;
+
+// ucCntlFlag
+#define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN          1
+#define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE            2
+#define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE         4
+#define ATOM_PLL_CNTL_FLAG_SPLL_ISPARE_9                  8
+
+
+// V4 are only used for APU which PLL outside GPU
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4
+{
+#if ATOM_BIG_ENDIAN
+  ULONG  ucPostDiv:8;        //return parameter: post divider which is used to program to register directly
+  ULONG  ulClock:24;         //Input= target clock, output = actual clock
+#else
+  ULONG  ulClock:24;         //Input= target clock, output = actual clock
+  ULONG  ucPostDiv:8;        //return parameter: post divider which is used to program to register directly
+#endif
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
+{
+  union
+  {
+    ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+    ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter
+  };
+  UCHAR   ucRefDiv;                           //Output Parameter
+  UCHAR   ucPostDiv;                          //Output Parameter
+  union
+  {
+    UCHAR   ucCntlFlag;                       //Output Flags
+    UCHAR   ucInputFlag;                      //Input Flags. ucInputFlag[0] - Strobe(1)/Performance(0) mode
+  };
+  UCHAR   ucReserved;
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5;
+
+
+typedef struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6
+{
+  ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+  ULONG   ulReserved[2];
+}COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6;
+
+//ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag
+#define COMPUTE_GPUCLK_INPUT_FLAG_CLK_TYPE_MASK            0x0f
+#define COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK           0x00
+#define COMPUTE_GPUCLK_INPUT_FLAG_SCLK                     0x01
+
+
+typedef struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6
+{
+  COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4  ulClock;         //Output Parameter: ucPostDiv=DFS divider
+  ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter: PLL FB divider
+  UCHAR   ucPllRefDiv;                      //Output Parameter: PLL ref divider
+  UCHAR   ucPllPostDiv;                     //Output Parameter: PLL post divider
+  UCHAR   ucPllCntlFlag;                    //Output Flags: control flag
+  UCHAR   ucReserved;
+}COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6;
+
+//ucPllCntlFlag
+#define SPLL_CNTL_FLAG_VCO_MODE_MASK            0x03
+
+
+// ucInputFlag
+#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN  1   // 1-StrobeMode, 0-PerformanceMode
+
+// use for ComputeMemoryClockParamTable
+typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1
+{
+  union
+  {
+    ULONG  ulClock;
+    ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output:UPPER_WORD=FB_DIV_INTEGER,  LOWER_WORD=FB_DIV_FRAC shl (16-FB_FRACTION_BITS)
+  };
+  UCHAR   ucDllSpeed;                         //Output
+  UCHAR   ucPostDiv;                          //Output
+  union{
+    UCHAR   ucInputFlag;                      //Input : ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN: 1-StrobeMode, 0-PerformanceMode
+    UCHAR   ucPllCntlFlag;                    //Output:
+  };
+  UCHAR   ucBWCntl;
+}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1;
+
+// definition of ucInputFlag
+#define MPLL_INPUT_FLAG_STROBE_MODE_EN          0x01
+// definition of ucPllCntlFlag
+#define MPLL_CNTL_FLAG_VCO_MODE_MASK            0x03
+#define MPLL_CNTL_FLAG_BYPASS_DQ_PLL            0x04
+#define MPLL_CNTL_FLAG_QDR_ENABLE               0x08
+#define MPLL_CNTL_FLAG_AD_HALF_RATE             0x10
+
+//MPLL_CNTL_FLAG_BYPASS_AD_PLL has a wrong name, should be BYPASS_DQ_PLL
+#define MPLL_CNTL_FLAG_BYPASS_AD_PLL            0x04
+
+typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
+{
+  ATOM_COMPUTE_CLOCK_FREQ ulClock;
+  ULONG ulReserved[2];
+}DYNAMICE_MEMORY_SETTINGS_PARAMETER;
+
+typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER
+{
+  ATOM_COMPUTE_CLOCK_FREQ ulClock;
+  ULONG ulMemoryClock;
+  ULONG ulReserved;
+}DYNAMICE_ENGINE_SETTINGS_PARAMETER;
+
+/****************************************************************************/
+// Structures used by SetEngineClockTable
+/****************************************************************************/
+typedef struct _SET_ENGINE_CLOCK_PARAMETERS
+{
+  ULONG ulTargetEngineClock;          //In 10Khz unit
+}SET_ENGINE_CLOCK_PARAMETERS;
+
+typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION
+{
+  ULONG ulTargetEngineClock;          //In 10Khz unit
+  COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
+}SET_ENGINE_CLOCK_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structures used by SetMemoryClockTable
+/****************************************************************************/
+typedef struct _SET_MEMORY_CLOCK_PARAMETERS
+{
+  ULONG ulTargetMemoryClock;          //In 10Khz unit
+}SET_MEMORY_CLOCK_PARAMETERS;
+
+typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION
+{
+  ULONG ulTargetMemoryClock;          //In 10Khz unit
+  COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
+}SET_MEMORY_CLOCK_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structures used by ASIC_Init.ctb
+/****************************************************************************/
+typedef struct _ASIC_INIT_PARAMETERS
+{
+  ULONG ulDefaultEngineClock;         //In 10Khz unit
+  ULONG ulDefaultMemoryClock;         //In 10Khz unit
+}ASIC_INIT_PARAMETERS;
+
+typedef struct _ASIC_INIT_PS_ALLOCATION
+{
+  ASIC_INIT_PARAMETERS sASICInitClocks;
+  SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; //Caller doesn't need to init this structure
+}ASIC_INIT_PS_ALLOCATION;
+
+typedef struct _ASIC_INIT_CLOCK_PARAMETERS
+{
+  ULONG ulClkFreqIn10Khz:24;
+  ULONG ucClkFlag:8;
+}ASIC_INIT_CLOCK_PARAMETERS;
+
+typedef struct _ASIC_INIT_PARAMETERS_V1_2
+{
+  ASIC_INIT_CLOCK_PARAMETERS asSclkClock;         //In 10Khz unit
+  ASIC_INIT_CLOCK_PARAMETERS asMemClock;          //In 10Khz unit
+}ASIC_INIT_PARAMETERS_V1_2;
+
+typedef struct _ASIC_INIT_PS_ALLOCATION_V1_2
+{
+  ASIC_INIT_PARAMETERS_V1_2 sASICInitClocks;
+  ULONG ulReserved[8];
+}ASIC_INIT_PS_ALLOCATION_V1_2;
+
+/****************************************************************************/
+// Structure used by DynamicClockGatingTable.ctb
+/****************************************************************************/
+typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS
+{
+  UCHAR ucEnable;                     // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR ucPadding[3];
+}DYNAMIC_CLOCK_GATING_PARAMETERS;
+#define  DYNAMIC_CLOCK_GATING_PS_ALLOCATION  DYNAMIC_CLOCK_GATING_PARAMETERS
+
+/****************************************************************************/
+// Structure used by EnableDispPowerGatingTable.ctb
+/****************************************************************************/
+typedef struct _ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1
+{
+  UCHAR ucDispPipeId;                 // ATOM_CRTC1, ATOM_CRTC2, ...
+  UCHAR ucEnable;                     // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR ucPadding[2];
+}ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1;
+
+typedef struct _ENABLE_DISP_POWER_GATING_PS_ALLOCATION
+{
+  UCHAR ucDispPipeId;                 // ATOM_CRTC1, ATOM_CRTC2, ...
+  UCHAR ucEnable;                     // ATOM_ENABLE/ATOM_DISABLE/ATOM_INIT
+  UCHAR ucPadding[2];
+  ULONG ulReserved[4];
+}ENABLE_DISP_POWER_GATING_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structure used by EnableASIC_StaticPwrMgtTable.ctb
+/****************************************************************************/
+typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
+{
+  UCHAR ucEnable;                     // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR ucPadding[3];
+}ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS;
+#define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION  ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
+
+/****************************************************************************/
+// Structures used by DAC_LoadDetectionTable.ctb
+/****************************************************************************/
+typedef struct _DAC_LOAD_DETECTION_PARAMETERS
+{
+  USHORT usDeviceID;                  //{ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT}
+  UCHAR  ucDacType;                   //{ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC}
+  UCHAR  ucMisc;                                 //Valid only when table revision =1.3 and above
+}DAC_LOAD_DETECTION_PARAMETERS;
+
+// DAC_LOAD_DETECTION_PARAMETERS.ucMisc
+#define DAC_LOAD_MISC_YPrPb                  0x01
+
+typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION
+{
+  DAC_LOAD_DETECTION_PARAMETERS            sDacload;
+  ULONG                                    Reserved[2];// Don't set this one, allocation for EXT DAC
+}DAC_LOAD_DETECTION_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb
+/****************************************************************************/
+typedef struct _DAC_ENCODER_CONTROL_PARAMETERS
+{
+  USHORT usPixelClock;                // in 10KHz; for bios convenient
+  UCHAR  ucDacStandard;               // See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0)
+  UCHAR  ucAction;                    // 0: turn off encoder
+                                      // 1: setup and turn on encoder
+                                      // 7: ATOM_ENCODER_INIT Initialize DAC
+}DAC_ENCODER_CONTROL_PARAMETERS;
+
+#define DAC_ENCODER_CONTROL_PS_ALLOCATION  DAC_ENCODER_CONTROL_PARAMETERS
+
+/****************************************************************************/
+// Structures used by DIG1EncoderControlTable
+//                    DIG2EncoderControlTable
+//                    ExternalEncoderControlTable
+/****************************************************************************/
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS
+{
+  USHORT usPixelClock;      // in 10KHz; for bios convenient
+  UCHAR  ucConfig;
+                            // [2] Link Select:
+                            // =0: PHY linkA if bfLane<3
+                            // =1: PHY linkB if bfLanes<3
+                            // =0: PHY linkA+B if bfLanes=3
+                            // [3] Transmitter Sel
+                            // =0: UNIPHY or PCIEPHY
+                            // =1: LVTMA
+  UCHAR ucAction;           // =0: turn off encoder
+                            // =1: turn on encoder
+  UCHAR ucEncoderMode;
+                            // =0: DP   encoder
+                            // =1: LVDS encoder
+                            // =2: DVI  encoder
+                            // =3: HDMI encoder
+                            // =4: SDVO encoder
+  UCHAR ucLaneNum;          // how many lanes to enable
+  UCHAR ucReserved[2];
+}DIG_ENCODER_CONTROL_PARAMETERS;
+#define DIG_ENCODER_CONTROL_PS_ALLOCATION             DIG_ENCODER_CONTROL_PARAMETERS
+#define EXTERNAL_ENCODER_CONTROL_PARAMETER            DIG_ENCODER_CONTROL_PARAMETERS
+
+//ucConfig
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK           0x01
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ        0x00
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ        0x01
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_5_40GHZ        0x02
+#define ATOM_ENCODER_CONFIG_LINK_SEL_MASK             0x04
+#define ATOM_ENCODER_CONFIG_LINKA                     0x00
+#define ATOM_ENCODER_CONFIG_LINKB                     0x04
+#define ATOM_ENCODER_CONFIG_LINKA_B                   ATOM_TRANSMITTER_CONFIG_LINKA
+#define ATOM_ENCODER_CONFIG_LINKB_A                   ATOM_ENCODER_CONFIG_LINKB
+#define ATOM_ENCODER_CONFIG_TRANSMITTER_SEL_MASK      0x08
+#define ATOM_ENCODER_CONFIG_UNIPHY                    0x00
+#define ATOM_ENCODER_CONFIG_LVTMA                     0x08
+#define ATOM_ENCODER_CONFIG_TRANSMITTER1              0x00
+#define ATOM_ENCODER_CONFIG_TRANSMITTER2              0x08
+#define ATOM_ENCODER_CONFIG_DIGB                      0x80         // VBIOS Internal use, outside SW should set this bit=0
+// ucAction
+// ATOM_ENABLE:  Enable Encoder
+// ATOM_DISABLE: Disable Encoder
+
+//ucEncoderMode
+#define ATOM_ENCODER_MODE_DP                          0
+#define ATOM_ENCODER_MODE_LVDS                        1
+#define ATOM_ENCODER_MODE_DVI                         2
+#define ATOM_ENCODER_MODE_HDMI                        3
+#define ATOM_ENCODER_MODE_SDVO                        4
+#define ATOM_ENCODER_MODE_DP_AUDIO                    5
+#define ATOM_ENCODER_MODE_TV                          13
+#define ATOM_ENCODER_MODE_CV                          14
+#define ATOM_ENCODER_MODE_CRT                         15
+#define ATOM_ENCODER_MODE_DVO                         16
+#define ATOM_ENCODER_MODE_DP_SST                      ATOM_ENCODER_MODE_DP    // For DP1.2
+#define ATOM_ENCODER_MODE_DP_MST                      5                       // For DP1.2
+
+
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V2
+{
+#if ATOM_BIG_ENDIAN
+    UCHAR ucReserved1:2;
+    UCHAR ucTransmitterSel:2;     // =0: UniphyAB, =1: UniphyCD  =2: UniphyEF
+    UCHAR ucLinkSel:1;            // =0: linkA/C/E =1: linkB/D/F
+    UCHAR ucReserved:1;
+    UCHAR ucDPLinkRate:1;         // =0: 1.62Ghz, =1: 2.7Ghz
+#else
+    UCHAR ucDPLinkRate:1;         // =0: 1.62Ghz, =1: 2.7Ghz
+    UCHAR ucReserved:1;
+    UCHAR ucLinkSel:1;            // =0: linkA/C/E =1: linkB/D/F
+    UCHAR ucTransmitterSel:2;     // =0: UniphyAB, =1: UniphyCD  =2: UniphyEF
+    UCHAR ucReserved1:2;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V2;
+
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
+{
+  USHORT usPixelClock;      // in 10KHz; for bios convenient
+  ATOM_DIG_ENCODER_CONFIG_V2 acConfig;
+  UCHAR ucAction;
+  UCHAR ucEncoderMode;
+                            // =0: DP   encoder
+                            // =1: LVDS encoder
+                            // =2: DVI  encoder
+                            // =3: HDMI encoder
+                            // =4: SDVO encoder
+  UCHAR ucLaneNum;          // how many lanes to enable
+  UCHAR ucStatus;           // = DP_LINK_TRAINING_COMPLETE or DP_LINK_TRAINING_INCOMPLETE, only used by VBIOS with command ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS
+  UCHAR ucReserved;
+}DIG_ENCODER_CONTROL_PARAMETERS_V2;
+
+//ucConfig
+#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK            0x01
+#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ        0x00
+#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ        0x01
+#define ATOM_ENCODER_CONFIG_V2_LINK_SEL_MASK              0x04
+#define ATOM_ENCODER_CONFIG_V2_LINKA                          0x00
+#define ATOM_ENCODER_CONFIG_V2_LINKB                          0x04
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER_SEL_MASK     0x18
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER1                0x00
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2                0x08
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3                0x10
+
+// ucAction:
+// ATOM_DISABLE
+// ATOM_ENABLE
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START       0x08
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1    0x09
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2    0x0a
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3    0x13
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE    0x0b
+#define ATOM_ENCODER_CMD_DP_VIDEO_OFF                 0x0c
+#define ATOM_ENCODER_CMD_DP_VIDEO_ON                  0x0d
+#define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS    0x0e
+#define ATOM_ENCODER_CMD_SETUP                        0x0f
+#define ATOM_ENCODER_CMD_SETUP_PANEL_MODE            0x10
+
+// ucStatus
+#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE    0x10
+#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE  0x00
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=3
+// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V3
+{
+#if ATOM_BIG_ENDIAN
+    UCHAR ucReserved1:1;
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
+    UCHAR ucReserved:3;
+    UCHAR ucDPLinkRate:1;         // =0: 1.62Ghz, =1: 2.7Ghz
+#else
+    UCHAR ucDPLinkRate:1;         // =0: 1.62Ghz, =1: 2.7Ghz
+    UCHAR ucReserved:3;
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
+    UCHAR ucReserved1:1;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V3;
+
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_MASK            0x03
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ        0x00
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ        0x01
+#define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL                 0x70
+#define ATOM_ENCODER_CONFIG_V3_DIG0_ENCODER                 0x00
+#define ATOM_ENCODER_CONFIG_V3_DIG1_ENCODER                 0x10
+#define ATOM_ENCODER_CONFIG_V3_DIG2_ENCODER                 0x20
+#define ATOM_ENCODER_CONFIG_V3_DIG3_ENCODER                 0x30
+#define ATOM_ENCODER_CONFIG_V3_DIG4_ENCODER                 0x40
+#define ATOM_ENCODER_CONFIG_V3_DIG5_ENCODER                 0x50
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
+{
+  USHORT usPixelClock;      // in 10KHz; for bios convenient
+  ATOM_DIG_ENCODER_CONFIG_V3 acConfig;
+  UCHAR ucAction;
+  union{
+    UCHAR ucEncoderMode;
+                            // =0: DP   encoder
+                            // =1: LVDS encoder
+                            // =2: DVI  encoder
+                            // =3: HDMI encoder
+                            // =4: SDVO encoder
+                            // =5: DP audio
+    UCHAR ucPanelMode;        // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE
+                            // =0:     external DP
+                            // =0x1:   internal DP2
+                            // =0x11:  internal DP1 for NutMeg/Travis DP translator
+  };
+  UCHAR ucLaneNum;          // how many lanes to enable
+  UCHAR ucBitPerColor;      // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
+  UCHAR ucReserved;
+}DIG_ENCODER_CONTROL_PARAMETERS_V3;
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=4
+// start from NI
+// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V4
+{
+#if ATOM_BIG_ENDIAN
+    UCHAR ucReserved1:1;
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
+    UCHAR ucReserved:2;
+    UCHAR ucDPLinkRate:2;         // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz    <= Changed comparing to previous version
+#else
+    UCHAR ucDPLinkRate:2;         // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz    <= Changed comparing to previous version
+    UCHAR ucReserved:2;
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
+    UCHAR ucReserved1:1;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V4;
+
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_MASK            0x03
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ        0x00
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ        0x01
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ        0x02
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_3_24GHZ        0x03
+#define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL                 0x70
+#define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER                 0x00
+#define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER                 0x10
+#define ATOM_ENCODER_CONFIG_V4_DIG2_ENCODER                 0x20
+#define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER                 0x30
+#define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER                 0x40
+#define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER                 0x50
+#define ATOM_ENCODER_CONFIG_V4_DIG6_ENCODER                 0x60
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
+{
+  USHORT usPixelClock;      // in 10KHz; for bios convenient
+  union{
+  ATOM_DIG_ENCODER_CONFIG_V4 acConfig;
+  UCHAR ucConfig;
+  };
+  UCHAR ucAction;
+  union{
+    UCHAR ucEncoderMode;
+                            // =0: DP   encoder
+                            // =1: LVDS encoder
+                            // =2: DVI  encoder
+                            // =3: HDMI encoder
+                            // =4: SDVO encoder
+                            // =5: DP audio
+    UCHAR ucPanelMode;      // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE
+                            // =0:     external DP
+                            // =0x1:   internal DP2
+                            // =0x11:  internal DP1 for NutMeg/Travis DP translator
+  };
+  UCHAR ucLaneNum;          // how many lanes to enable
+  UCHAR ucBitPerColor;      // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
+  UCHAR ucHPD_ID;           // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version
+}DIG_ENCODER_CONTROL_PARAMETERS_V4;
+
+// define ucBitPerColor:
+#define PANEL_BPC_UNDEFINE                               0x00
+#define PANEL_6BIT_PER_COLOR                             0x01
+#define PANEL_8BIT_PER_COLOR                             0x02
+#define PANEL_10BIT_PER_COLOR                            0x03
+#define PANEL_12BIT_PER_COLOR                            0x04
+#define PANEL_16BIT_PER_COLOR                            0x05
+
+//define ucPanelMode
+#define DP_PANEL_MODE_EXTERNAL_DP_MODE                   0x00
+#define DP_PANEL_MODE_INTERNAL_DP2_MODE                  0x01
+#define DP_PANEL_MODE_INTERNAL_DP1_MODE                  0x11
+
+/****************************************************************************/
+// Structures used by UNIPHYTransmitterControlTable
+//                    LVTMATransmitterControlTable
+//                    DVOOutputControlTable
+/****************************************************************************/
+typedef struct _ATOM_DP_VS_MODE
+{
+  UCHAR ucLaneSel;
+  UCHAR ucLaneSet;
+}ATOM_DP_VS_MODE;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS
+{
+   union
+   {
+  USHORT usPixelClock;      // in 10KHz; for bios convenient
+   USHORT usInitInfo;         // when init uniphy,lower 8bit is used for connector type defined in objectid.h
+  ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
+   };
+  UCHAR ucConfig;
+                                       // [0]=0: 4 lane Link,
+                                       //    =1: 8 lane Link ( Dual Links TMDS )
+                          // [1]=0: InCoherent mode
+                                       //    =1: Coherent Mode
+                                       // [2] Link Select:
+                                      // =0: PHY linkA   if bfLane<3
+                                       // =1: PHY linkB   if bfLanes<3
+                                      // =0: PHY linkA+B if bfLanes=3
+                          // [5:4]PCIE lane Sel
+                          // =0: lane 0~3 or 0~7
+                          // =1: lane 4~7
+                          // =2: lane 8~11 or 8~15
+                          // =3: lane 12~15
+   UCHAR ucAction;              // =0: turn off encoder
+                           // =1: turn on encoder
+  UCHAR ucReserved[4];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS;
+
+#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION      DIG_TRANSMITTER_CONTROL_PARAMETERS
+
+//ucInitInfo
+#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK   0x00ff
+
+//ucConfig
+#define ATOM_TRANSMITTER_CONFIG_8LANE_LINK         0x01
+#define ATOM_TRANSMITTER_CONFIG_COHERENT            0x02
+#define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK      0x04
+#define ATOM_TRANSMITTER_CONFIG_LINKA                  0x00
+#define ATOM_TRANSMITTER_CONFIG_LINKB                  0x04
+#define ATOM_TRANSMITTER_CONFIG_LINKA_B               0x00
+#define ATOM_TRANSMITTER_CONFIG_LINKB_A               0x04
+
+#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK   0x08         // only used when ATOM_TRANSMITTER_ACTION_ENABLE
+#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER      0x00            // only used when ATOM_TRANSMITTER_ACTION_ENABLE
+#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER      0x08            // only used when ATOM_TRANSMITTER_ACTION_ENABLE
+
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK         0x30
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL         0x00
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PCIE         0x20
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_XTALIN      0x30
+#define ATOM_TRANSMITTER_CONFIG_LANE_SEL_MASK      0xc0
+#define ATOM_TRANSMITTER_CONFIG_LANE_0_3            0x00
+#define ATOM_TRANSMITTER_CONFIG_LANE_0_7            0x00
+#define ATOM_TRANSMITTER_CONFIG_LANE_4_7            0x40
+#define ATOM_TRANSMITTER_CONFIG_LANE_8_11            0x80
+#define ATOM_TRANSMITTER_CONFIG_LANE_8_15            0x80
+#define ATOM_TRANSMITTER_CONFIG_LANE_12_15         0xc0
+
+//ucAction
+#define ATOM_TRANSMITTER_ACTION_DISABLE                      0
+#define ATOM_TRANSMITTER_ACTION_ENABLE                      1
+#define ATOM_TRANSMITTER_ACTION_LCD_BLOFF                   2
+#define ATOM_TRANSMITTER_ACTION_LCD_BLON                   3
+#define ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL  4
+#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_START       5
+#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_STOP          6
+#define ATOM_TRANSMITTER_ACTION_INIT                         7
+#define ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT          8
+#define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT             9
+#define ATOM_TRANSMITTER_ACTION_SETUP                         10
+#define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH           11
+#define ATOM_TRANSMITTER_ACTION_POWER_ON               12
+#define ATOM_TRANSMITTER_ACTION_POWER_OFF              13
+
+// Following are used for DigTransmitterControlTable ver1.2
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+  UCHAR ucReserved:1;
+  UCHAR fDPConnector:1;             //bit4=0: DP connector  =1: None DP connector
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 )
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+#else
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 )
+  UCHAR fDPConnector:1;             //bit4=0: DP connector  =1: None DP connector
+  UCHAR ucReserved:1;
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V2;
+
+//ucConfig
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR         0x01
+
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V2_COHERENT                      0x02
+
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK              0x04
+#define ATOM_TRANSMITTER_CONFIG_V2_LINKA                       0x00
+#define ATOM_TRANSMITTER_CONFIG_V2_LINKB                        0x04
+
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK           0x08
+#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER                0x00            // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP
+#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER                0x08            // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP
+
+// Bit4
+#define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR                 0x10
+
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK     0xC0
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1              0x00   //AB
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2              0x40   //CD
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3              0x80   //EF
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2
+{
+   union
+   {
+  USHORT usPixelClock;      // in 10KHz; for bios convenient
+   USHORT usInitInfo;         // when init uniphy,lower 8bit is used for connector type defined in objectid.h
+  ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
+   };
+  ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig;
+   UCHAR ucAction;              // define as ATOM_TRANSMITER_ACTION_XXX
+  UCHAR ucReserved[4];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V2;
+
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+  UCHAR ucRefClkSource:2;           //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+#else
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+  UCHAR ucRefClkSource:2;           //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V3;
+
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
+{
+   union
+   {
+    USHORT usPixelClock;      // in 10KHz; for bios convenient
+     USHORT usInitInfo;         // when init uniphy,lower 8bit is used for connector type defined in objectid.h
+    ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
+   };
+  ATOM_DIG_TRANSMITTER_CONFIG_V3 acConfig;
+   UCHAR ucAction;                // define as ATOM_TRANSMITER_ACTION_XXX
+  UCHAR ucLaneNum;
+  UCHAR ucReserved[3];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V3;
+
+//ucConfig
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V3_DUAL_LINK_CONNECTOR         0x01
+
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V3_COHERENT                      0x02
+
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V3_LINK_SEL_MASK              0x04
+#define ATOM_TRANSMITTER_CONFIG_V3_LINKA                       0x00
+#define ATOM_TRANSMITTER_CONFIG_V3_LINKB                        0x04
+
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V3_ENCODER_SEL_MASK           0x08
+#define ATOM_TRANSMITTER_CONFIG_V3_DIG1_ENCODER                0x00
+#define ATOM_TRANSMITTER_CONFIG_V3_DIG2_ENCODER                0x08
+
+// Bit5:4
+#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SEL_MASK            0x30
+#define ATOM_TRASMITTER_CONFIG_V3_P1PLL                        0x00
+#define ATOM_TRASMITTER_CONFIG_V3_P2PLL                        0x10
+#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SRC_EXT            0x20
+
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER_SEL_MASK     0xC0
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER1              0x00   //AB
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2              0x40   //CD
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3              0x80   //EF
+
+
+/****************************************************************************/
+// Structures used by UNIPHYTransmitterControlTable V1.4
+// ASIC Families: NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=4
+/****************************************************************************/
+typedef struct _ATOM_DP_VS_MODE_V4
+{
+  UCHAR ucLaneSel;
+ 	union
+	{
+ 	  UCHAR ucLaneSet;
+ 	  struct {
+#if ATOM_BIG_ENDIAN
+ 		  UCHAR ucPOST_CURSOR2:2;         //Bit[7:6] Post Cursor2 Level      <= New in V4
+ 		  UCHAR ucPRE_EMPHASIS:3;         //Bit[5:3] Pre-emphasis Level
+ 		  UCHAR ucVOLTAGE_SWING:3;        //Bit[2:0] Voltage Swing Level
+#else
+ 		  UCHAR ucVOLTAGE_SWING:3;        //Bit[2:0] Voltage Swing Level
+ 		  UCHAR ucPRE_EMPHASIS:3;         //Bit[5:3] Pre-emphasis Level
+ 		  UCHAR ucPOST_CURSOR2:2;         //Bit[7:6] Post Cursor2 Level      <= New in V4
+#endif
+		};
+	};
+}ATOM_DP_VS_MODE_V4;
+
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V4
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+  UCHAR ucRefClkSource:2;           //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3   <= New
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+#else
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+  UCHAR ucRefClkSource:2;           //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3   <= New
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V4;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4
+{
+  union
+  {
+    USHORT usPixelClock;      // in 10KHz; for bios convenient
+    USHORT usInitInfo;         // when init uniphy,lower 8bit is used for connector type defined in objectid.h
+    ATOM_DP_VS_MODE_V4 asMode; // DP Voltage swing mode     Redefined comparing to previous version
+  };
+  union
+  {
+  ATOM_DIG_TRANSMITTER_CONFIG_V4 acConfig;
+  UCHAR ucConfig;
+  };
+  UCHAR ucAction;                // define as ATOM_TRANSMITER_ACTION_XXX
+  UCHAR ucLaneNum;
+  UCHAR ucReserved[3];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V4;
+
+//ucConfig
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V4_DUAL_LINK_CONNECTOR         0x01
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V4_COHERENT                      0x02
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V4_LINK_SEL_MASK              0x04
+#define ATOM_TRANSMITTER_CONFIG_V4_LINKA                       0x00
+#define ATOM_TRANSMITTER_CONFIG_V4_LINKB                        0x04
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V4_ENCODER_SEL_MASK           0x08
+#define ATOM_TRANSMITTER_CONFIG_V4_DIG1_ENCODER                0x00
+#define ATOM_TRANSMITTER_CONFIG_V4_DIG2_ENCODER                0x08
+// Bit5:4
+#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SEL_MASK            0x30
+#define ATOM_TRANSMITTER_CONFIG_V4_P1PLL                       0x00
+#define ATOM_TRANSMITTER_CONFIG_V4_P2PLL                      0x10
+#define ATOM_TRANSMITTER_CONFIG_V4_DCPLL                      0x20   // New in _V4
+#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SRC_EXT           0x30   // Changed comparing to V3
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER_SEL_MASK     0xC0
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER1              0x00   //AB
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER2              0x40   //CD
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3              0x80   //EF
+
+
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V5
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucReservd1:1;
+  UCHAR ucHPDSel:3;
+  UCHAR ucPhyClkSrcId:2;
+  UCHAR ucCoherentMode:1;
+  UCHAR ucReserved:1;
+#else
+  UCHAR ucReserved:1;
+  UCHAR ucCoherentMode:1;
+  UCHAR ucPhyClkSrcId:2;
+  UCHAR ucHPDSel:3;
+  UCHAR ucReservd1:1;
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V5;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
+{
+  USHORT usSymClock;              // Encoder Clock in 10kHz,(DP mode)= linkclock/10, (TMDS/LVDS/HDMI)= pixel clock,  (HDMI deep color), =pixel clock * deep_color_ratio
+  UCHAR  ucPhyId;                   // 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4= UNIPHYE 5=UNIPHYF
+  UCHAR  ucAction;                // define as ATOM_TRANSMITER_ACTION_xxx
+  UCHAR  ucLaneNum;                 // indicate lane number 1-8
+  UCHAR  ucConnObjId;               // Connector Object Id defined in ObjectId.h
+  UCHAR  ucDigMode;                 // indicate DIG mode
+  union{
+  ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig;
+  UCHAR ucConfig;
+  };
+  UCHAR  ucDigEncoderSel;           // indicate DIG front end encoder
+  UCHAR  ucDPLaneSet;
+  UCHAR  ucReserved;
+  UCHAR  ucReserved1;
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5;
+
+//ucPhyId
+#define ATOM_PHY_ID_UNIPHYA                                 0
+#define ATOM_PHY_ID_UNIPHYB                                 1
+#define ATOM_PHY_ID_UNIPHYC                                 2
+#define ATOM_PHY_ID_UNIPHYD                                 3
+#define ATOM_PHY_ID_UNIPHYE                                 4
+#define ATOM_PHY_ID_UNIPHYF                                 5
+#define ATOM_PHY_ID_UNIPHYG                                 6
+
+// ucDigEncoderSel
+#define ATOM_TRANMSITTER_V5__DIGA_SEL                       0x01
+#define ATOM_TRANMSITTER_V5__DIGB_SEL                       0x02
+#define ATOM_TRANMSITTER_V5__DIGC_SEL                       0x04
+#define ATOM_TRANMSITTER_V5__DIGD_SEL                       0x08
+#define ATOM_TRANMSITTER_V5__DIGE_SEL                       0x10
+#define ATOM_TRANMSITTER_V5__DIGF_SEL                       0x20
+#define ATOM_TRANMSITTER_V5__DIGG_SEL                       0x40
+
+// ucDigMode
+#define ATOM_TRANSMITTER_DIGMODE_V5_DP                      0
+#define ATOM_TRANSMITTER_DIGMODE_V5_LVDS                    1
+#define ATOM_TRANSMITTER_DIGMODE_V5_DVI                     2
+#define ATOM_TRANSMITTER_DIGMODE_V5_HDMI                    3
+#define ATOM_TRANSMITTER_DIGMODE_V5_SDVO                    4
+#define ATOM_TRANSMITTER_DIGMODE_V5_DP_MST                  5
+
+// ucDPLaneSet
+#define DP_LANE_SET__0DB_0_4V                               0x00
+#define DP_LANE_SET__0DB_0_6V                               0x01
+#define DP_LANE_SET__0DB_0_8V                               0x02
+#define DP_LANE_SET__0DB_1_2V                               0x03
+#define DP_LANE_SET__3_5DB_0_4V                             0x08
+#define DP_LANE_SET__3_5DB_0_6V                             0x09
+#define DP_LANE_SET__3_5DB_0_8V                             0x0a
+#define DP_LANE_SET__6DB_0_4V                               0x10
+#define DP_LANE_SET__6DB_0_6V                               0x11
+#define DP_LANE_SET__9_5DB_0_4V                             0x18
+
+// ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig;
+// Bit1
+#define ATOM_TRANSMITTER_CONFIG_V5_COHERENT                      0x02
+
+// Bit3:2
+#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_MASK            0x0c
+#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_SHIFT          0x02
+
+#define ATOM_TRANSMITTER_CONFIG_V5_P1PLL                       0x00
+#define ATOM_TRANSMITTER_CONFIG_V5_P2PLL                      0x04
+#define ATOM_TRANSMITTER_CONFIG_V5_P0PLL                      0x08
+#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT           0x0c
+// Bit6:4
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_MASK                0x70
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_SHIFT            0x04
+
+#define ATOM_TRANSMITTER_CONFIG_V5_NO_HPD_SEL                    0x00
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL                      0x10
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL                      0x20
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL                      0x30
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL                      0x40
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL                      0x50
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL                      0x60
+
+#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION_V1_5            DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
+
+
+/****************************************************************************/
+// Structures used by ExternalEncoderControlTable V1.3
+// ASIC Families: Evergreen, Llano, NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=3
+/****************************************************************************/
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3
+{
+  union{
+  USHORT usPixelClock;      // pixel clock in 10Khz, valid when ucAction=SETUP/ENABLE_OUTPUT
+  USHORT usConnectorId;     // connector id, valid when ucAction = INIT
+  };
+  UCHAR  ucConfig;          // indicate which encoder, and DP link rate when ucAction = SETUP/ENABLE_OUTPUT
+  UCHAR  ucAction;          //
+  UCHAR  ucEncoderMode;     // encoder mode, only used when ucAction = SETUP/ENABLE_OUTPUT
+  UCHAR  ucLaneNum;         // lane number, only used when ucAction = SETUP/ENABLE_OUTPUT
+  UCHAR  ucBitPerColor;     // output bit per color, only valid when ucAction = SETUP/ENABLE_OUTPUT and ucEncodeMode= DP
+  UCHAR  ucReserved;
+}EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3;
+
+// ucAction
+#define EXTERANL_ENCODER_ACTION_V3_DISABLE_OUTPUT         0x00
+#define EXTERANL_ENCODER_ACTION_V3_ENABLE_OUTPUT          0x01
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT           0x07
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP          0x0f
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF   0x10
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING       0x11
+#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION      0x12
+#define EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP              0x14
+
+// ucConfig
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK            0x03
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ        0x00
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ        0x01
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ        0x02
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER_SEL_MAKS          0x70
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER1                  0x00
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER2                  0x10
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER3                  0x20
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3
+{
+  EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 sExtEncoder;
+  ULONG ulReserved[2];
+}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3;
+
+
+/****************************************************************************/
+// Structures used by DAC1OuputControlTable
+//                    DAC2OuputControlTable
+//                    LVTMAOutputControlTable  (Before DEC30)
+//                    TMDSAOutputControlTable  (Before DEC30)
+/****************************************************************************/
+typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+{
+  UCHAR  ucAction;                    // Possible input:ATOM_ENABLE||ATOMDISABLE
+                                      // When the display is LCD, in addition to above:
+                                      // ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START||
+                                      // ATOM_LCD_SELFTEST_STOP
+
+  UCHAR  aucPadding[3];               // padding to DWORD aligned
+}DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS;
+
+#define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+
+
+#define CRT1_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define CRT1_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define CRT2_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define CRT2_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define CV1_OUTPUT_CONTROL_PARAMETERS      DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define CV1_OUTPUT_CONTROL_PS_ALLOCATION   DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define TV1_OUTPUT_CONTROL_PARAMETERS      DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define TV1_OUTPUT_CONTROL_PS_ALLOCATION   DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define DFP1_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define DFP1_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define DFP2_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define DFP2_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define LCD1_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define LCD1_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define DVO_OUTPUT_CONTROL_PARAMETERS      DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define DVO_OUTPUT_CONTROL_PS_ALLOCATION   DIG_TRANSMITTER_CONTROL_PS_ALLOCATION
+#define DVO_OUTPUT_CONTROL_PARAMETERS_V3   DIG_TRANSMITTER_CONTROL_PARAMETERS
+
+
+typedef struct _LVTMA_OUTPUT_CONTROL_PARAMETERS_V2
+{
+  // Possible value of ucAction
+  // ATOM_TRANSMITTER_ACTION_LCD_BLON
+  // ATOM_TRANSMITTER_ACTION_LCD_BLOFF
+  // ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL
+  // ATOM_TRANSMITTER_ACTION_POWER_ON
+  // ATOM_TRANSMITTER_ACTION_POWER_OFF
+  UCHAR  ucAction;
+  UCHAR  ucBriLevel;
+  USHORT usPwmFreq;                  // in unit of Hz, 200 means 200Hz
+}LVTMA_OUTPUT_CONTROL_PARAMETERS_V2;
+
+
+
+/****************************************************************************/
+// Structures used by BlankCRTCTable
+/****************************************************************************/
+typedef struct _BLANK_CRTC_PARAMETERS
+{
+  UCHAR  ucCRTC;                       // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR  ucBlanking;                  // ATOM_BLANKING or ATOM_BLANKINGOFF
+  USHORT usBlackColorRCr;
+  USHORT usBlackColorGY;
+  USHORT usBlackColorBCb;
+}BLANK_CRTC_PARAMETERS;
+#define BLANK_CRTC_PS_ALLOCATION    BLANK_CRTC_PARAMETERS
+
+/****************************************************************************/
+// Structures used by EnableCRTCTable
+//                    EnableCRTCMemReqTable
+//                    UpdateCRTC_DoubleBufferRegistersTable
+/****************************************************************************/
+typedef struct _ENABLE_CRTC_PARAMETERS
+{
+  UCHAR ucCRTC;                         // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR ucEnable;                     // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR ucPadding[2];
+}ENABLE_CRTC_PARAMETERS;
+#define ENABLE_CRTC_PS_ALLOCATION   ENABLE_CRTC_PARAMETERS
+
+/****************************************************************************/
+// Structures used by SetCRTC_OverScanTable
+/****************************************************************************/
+typedef struct _SET_CRTC_OVERSCAN_PARAMETERS
+{
+  USHORT usOverscanRight;             // right
+  USHORT usOverscanLeft;              // left
+  USHORT usOverscanBottom;            // bottom
+  USHORT usOverscanTop;               // top
+  UCHAR  ucCRTC;                      // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR  ucPadding[3];
+}SET_CRTC_OVERSCAN_PARAMETERS;
+#define SET_CRTC_OVERSCAN_PS_ALLOCATION  SET_CRTC_OVERSCAN_PARAMETERS
+
+/****************************************************************************/
+// Structures used by SetCRTC_ReplicationTable
+/****************************************************************************/
+typedef struct _SET_CRTC_REPLICATION_PARAMETERS
+{
+  UCHAR ucH_Replication;              // horizontal replication
+  UCHAR ucV_Replication;              // vertical replication
+  UCHAR usCRTC;                       // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR ucPadding;
+}SET_CRTC_REPLICATION_PARAMETERS;
+#define SET_CRTC_REPLICATION_PS_ALLOCATION  SET_CRTC_REPLICATION_PARAMETERS
+
+/****************************************************************************/
+// Structures used by SelectCRTC_SourceTable
+/****************************************************************************/
+typedef struct _SELECT_CRTC_SOURCE_PARAMETERS
+{
+  UCHAR ucCRTC;                         // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR ucDevice;                     // ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|....
+  UCHAR ucPadding[2];
+}SELECT_CRTC_SOURCE_PARAMETERS;
+#define SELECT_CRTC_SOURCE_PS_ALLOCATION  SELECT_CRTC_SOURCE_PARAMETERS
+
+typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2
+{
+  UCHAR ucCRTC;                         // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR ucEncoderID;                  // DAC1/DAC2/TVOUT/DIG1/DIG2/DVO
+  UCHAR ucEncodeMode;                           // Encoding mode, only valid when using DIG1/DIG2/DVO
+  UCHAR ucPadding;
+}SELECT_CRTC_SOURCE_PARAMETERS_V2;
+
+//ucEncoderID
+//#define ASIC_INT_DAC1_ENCODER_ID                      0x00
+//#define ASIC_INT_TV_ENCODER_ID                           0x02
+//#define ASIC_INT_DIG1_ENCODER_ID                        0x03
+//#define ASIC_INT_DAC2_ENCODER_ID                        0x04
+//#define ASIC_EXT_TV_ENCODER_ID                           0x06
+//#define ASIC_INT_DVO_ENCODER_ID                           0x07
+//#define ASIC_INT_DIG2_ENCODER_ID                        0x09
+//#define ASIC_EXT_DIG_ENCODER_ID                           0x05
+
+//ucEncodeMode
+//#define ATOM_ENCODER_MODE_DP                              0
+//#define ATOM_ENCODER_MODE_LVDS                           1
+//#define ATOM_ENCODER_MODE_DVI                              2
+//#define ATOM_ENCODER_MODE_HDMI                           3
+//#define ATOM_ENCODER_MODE_SDVO                           4
+//#define ATOM_ENCODER_MODE_TV                              13
+//#define ATOM_ENCODER_MODE_CV                              14
+//#define ATOM_ENCODER_MODE_CRT                              15
+
+
+typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V3
+{
+  UCHAR ucCRTC;                         // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR ucEncoderID;                    // DAC1/DAC2/TVOUT/DIG1/DIG2/DVO
+  UCHAR ucEncodeMode;                   // Encoding mode, only valid when using DIG1/DIG2/DVO
+  UCHAR ucDstBpc;                       // PANEL_6/8/10/12BIT_PER_COLOR
+}SELECT_CRTC_SOURCE_PARAMETERS_V3;
+
+
+/****************************************************************************/
+// Structures used by SetPixelClockTable
+//                    GetPixelClockTable
+/****************************************************************************/
+//Major revision=1., Minor revision=1
+typedef struct _PIXEL_CLOCK_PARAMETERS
+{
+  USHORT usPixelClock;                // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+                                      // 0 means disable PPLL
+  USHORT usRefDiv;                    // Reference divider
+  USHORT usFbDiv;                     // feedback divider
+  UCHAR  ucPostDiv;                   // post divider
+  UCHAR  ucFracFbDiv;                 // fractional feedback divider
+  UCHAR  ucPpll;                      // ATOM_PPLL1 or ATOM_PPL2
+  UCHAR  ucRefDivSrc;                 // ATOM_PJITTER or ATO_NONPJITTER
+  UCHAR  ucCRTC;                      // Which CRTC uses this Ppll
+  UCHAR  ucPadding;
+}PIXEL_CLOCK_PARAMETERS;
+
+//Major revision=1., Minor revision=2, add ucMiscIfno
+//ucMiscInfo:
+#define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1
+#define MISC_DEVICE_INDEX_MASK        0xF0
+#define MISC_DEVICE_INDEX_SHIFT       4
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V2
+{
+  USHORT usPixelClock;                // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+                                      // 0 means disable PPLL
+  USHORT usRefDiv;                    // Reference divider
+  USHORT usFbDiv;                     // feedback divider
+  UCHAR  ucPostDiv;                   // post divider
+  UCHAR  ucFracFbDiv;                 // fractional feedback divider
+  UCHAR  ucPpll;                      // ATOM_PPLL1 or ATOM_PPL2
+  UCHAR  ucRefDivSrc;                 // ATOM_PJITTER or ATO_NONPJITTER
+  UCHAR  ucCRTC;                      // Which CRTC uses this Ppll
+  UCHAR  ucMiscInfo;                  // Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog
+}PIXEL_CLOCK_PARAMETERS_V2;
+
+//Major revision=1., Minor revision=3, structure/definition change
+//ucEncoderMode:
+//ATOM_ENCODER_MODE_DP
+//ATOM_ENOCDER_MODE_LVDS
+//ATOM_ENOCDER_MODE_DVI
+//ATOM_ENOCDER_MODE_HDMI
+//ATOM_ENOCDER_MODE_SDVO
+//ATOM_ENCODER_MODE_TV                                          13
+//ATOM_ENCODER_MODE_CV                                          14
+//ATOM_ENCODER_MODE_CRT                                          15
+
+//ucDVOConfig
+//#define DVO_ENCODER_CONFIG_RATE_SEL                     0x01
+//#define DVO_ENCODER_CONFIG_DDR_SPEED                  0x00
+//#define DVO_ENCODER_CONFIG_SDR_SPEED                  0x01
+//#define DVO_ENCODER_CONFIG_OUTPUT_SEL                  0x0c
+//#define DVO_ENCODER_CONFIG_LOW12BIT                     0x00
+//#define DVO_ENCODER_CONFIG_UPPER12BIT                  0x04
+//#define DVO_ENCODER_CONFIG_24BIT                        0x08
+
+//ucMiscInfo: also changed, see below
+#define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL                  0x01
+#define PIXEL_CLOCK_MISC_VGA_MODE                              0x02
+#define PIXEL_CLOCK_MISC_CRTC_SEL_MASK                     0x04
+#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1                     0x00
+#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2                     0x04
+#define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK         0x08
+#define PIXEL_CLOCK_MISC_REF_DIV_SRC                    0x10
+// V1.4 for RoadRunner
+#define PIXEL_CLOCK_V4_MISC_SS_ENABLE               0x10
+#define PIXEL_CLOCK_V4_MISC_COHERENT_MODE           0x20
+
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V3
+{
+  USHORT usPixelClock;                // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+                                      // 0 means disable PPLL. For VGA PPLL,make sure this value is not 0.
+  USHORT usRefDiv;                    // Reference divider
+  USHORT usFbDiv;                     // feedback divider
+  UCHAR  ucPostDiv;                   // post divider
+  UCHAR  ucFracFbDiv;                 // fractional feedback divider
+  UCHAR  ucPpll;                      // ATOM_PPLL1 or ATOM_PPL2
+  UCHAR  ucTransmitterId;             // graphic encoder id defined in objectId.h
+   union
+   {
+  UCHAR  ucEncoderMode;               // encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/
+   UCHAR  ucDVOConfig;                           // when use DVO, need to know SDR/DDR, 12bit or 24bit
+   };
+  UCHAR  ucMiscInfo;                  // bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel
+                                      // bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source
+                                      // bit[4]=0:use XTALIN as the source of reference divider,=1 use the pre-defined clock as the source of reference divider
+}PIXEL_CLOCK_PARAMETERS_V3;
+
+#define PIXEL_CLOCK_PARAMETERS_LAST                     PIXEL_CLOCK_PARAMETERS_V2
+#define GET_PIXEL_CLOCK_PS_ALLOCATION                  PIXEL_CLOCK_PARAMETERS_LAST
+
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V5
+{
+  UCHAR  ucCRTC;             // ATOM_CRTC1~6, indicate the CRTC controller to
+                             // drive the pixel clock. not used for DCPLL case.
+  union{
+  UCHAR  ucReserved;
+  UCHAR  ucFracFbDiv;        // [gphan] temporary to prevent build problem.  remove it after driver code is changed.
+  };
+  USHORT usPixelClock;       // target the pixel clock to drive the CRTC timing
+                             // 0 means disable PPLL/DCPLL.
+  USHORT usFbDiv;            // feedback divider integer part.
+  UCHAR  ucPostDiv;          // post divider.
+  UCHAR  ucRefDiv;           // Reference divider
+  UCHAR  ucPpll;             // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
+  UCHAR  ucTransmitterID;    // ASIC encoder id defined in objectId.h,
+                             // indicate which graphic encoder will be used.
+  UCHAR  ucEncoderMode;      // Encoder mode:
+  UCHAR  ucMiscInfo;         // bit[0]= Force program PPLL
+                             // bit[1]= when VGA timing is used.
+                             // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
+                             // bit[4]= RefClock source for PPLL.
+                             // =0: XTLAIN( default mode )
+                              // =1: other external clock source, which is pre-defined
+                             //     by VBIOS depend on the feature required.
+                             // bit[7:5]: reserved.
+  ULONG  ulFbDivDecFrac;     // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
+
+}PIXEL_CLOCK_PARAMETERS_V5;
+
+#define PIXEL_CLOCK_V5_MISC_FORCE_PROG_PPLL               0x01
+#define PIXEL_CLOCK_V5_MISC_VGA_MODE                        0x02
+#define PIXEL_CLOCK_V5_MISC_HDMI_BPP_MASK           0x0c
+#define PIXEL_CLOCK_V5_MISC_HDMI_24BPP              0x00
+#define PIXEL_CLOCK_V5_MISC_HDMI_30BPP              0x04
+#define PIXEL_CLOCK_V5_MISC_HDMI_32BPP              0x08
+#define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC             0x10
+
+typedef struct _CRTC_PIXEL_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+  ULONG  ucCRTC:8;            // ATOM_CRTC1~6, indicate the CRTC controller to
+                              // drive the pixel clock. not used for DCPLL case.
+  ULONG  ulPixelClock:24;     // target the pixel clock to drive the CRTC timing.
+                              // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
+#else
+  ULONG  ulPixelClock:24;     // target the pixel clock to drive the CRTC timing.
+                              // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
+  ULONG  ucCRTC:8;            // ATOM_CRTC1~6, indicate the CRTC controller to
+                              // drive the pixel clock. not used for DCPLL case.
+#endif
+}CRTC_PIXEL_CLOCK_FREQ;
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V6
+{
+  union{
+    CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;    // pixel clock and CRTC id frequency
+    ULONG ulDispEngClkFreq;                  // dispclk frequency
+  };
+  USHORT usFbDiv;            // feedback divider integer part.
+  UCHAR  ucPostDiv;          // post divider.
+  UCHAR  ucRefDiv;           // Reference divider
+  UCHAR  ucPpll;             // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
+  UCHAR  ucTransmitterID;    // ASIC encoder id defined in objectId.h,
+                             // indicate which graphic encoder will be used.
+  UCHAR  ucEncoderMode;      // Encoder mode:
+  UCHAR  ucMiscInfo;         // bit[0]= Force program PPLL
+                             // bit[1]= when VGA timing is used.
+                             // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
+                             // bit[4]= RefClock source for PPLL.
+                             // =0: XTLAIN( default mode )
+                              // =1: other external clock source, which is pre-defined
+                             //     by VBIOS depend on the feature required.
+                             // bit[7:5]: reserved.
+  ULONG  ulFbDivDecFrac;     // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
+
+}PIXEL_CLOCK_PARAMETERS_V6;
+
+#define PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL               0x01
+#define PIXEL_CLOCK_V6_MISC_VGA_MODE                        0x02
+#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK           0x0c
+#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP              0x00
+#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP              0x04
+#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6           0x08    //for V6, the correct defintion for 36bpp should be 2 for 36bpp(2:1)
+#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP              0x08
+#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6           0x04    //for V6, the correct defintion for 30bpp should be 1 for 36bpp(5:4)
+#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP              0x0c
+#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC             0x10
+#define PIXEL_CLOCK_V6_MISC_GEN_DPREFCLK            0x40
+#define PIXEL_CLOCK_V6_MISC_DPREFCLK_BYPASS         0x40
+
+typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
+{
+  PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput;
+}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2;
+
+typedef struct _GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2
+{
+  UCHAR  ucStatus;
+  UCHAR  ucRefDivSrc;                 // =1: reference clock source from XTALIN, =0: source from PCIE ref clock
+  UCHAR  ucReserved[2];
+}GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2;
+
+typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3
+{
+  PIXEL_CLOCK_PARAMETERS_V5 sDispClkInput;
+}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3;
+
+
+/****************************************************************************/
+// Structures used by AdjustDisplayPllTable
+/****************************************************************************/
+typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS
+{
+   USHORT usPixelClock;
+   UCHAR ucTransmitterID;
+   UCHAR ucEncodeMode;
+   union
+   {
+      UCHAR ucDVOConfig;                           //if DVO, need passing link rate and output 12bitlow or 24bit
+      UCHAR ucConfig;                                 //if none DVO, not defined yet
+   };
+   UCHAR ucReserved[3];
+}ADJUST_DISPLAY_PLL_PARAMETERS;
+
+#define ADJUST_DISPLAY_CONFIG_SS_ENABLE            0x10
+#define ADJUST_DISPLAY_PLL_PS_ALLOCATION              ADJUST_DISPLAY_PLL_PARAMETERS
+
+typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3
+{
+   USHORT usPixelClock;                    // target pixel clock
+   UCHAR ucTransmitterID;                  // GPU transmitter id defined in objectid.h
+   UCHAR ucEncodeMode;                     // encoder mode: CRT, LVDS, DP, TMDS or HDMI
+  UCHAR ucDispPllConfig;                 // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX
+  UCHAR ucExtTransmitterID;               // external encoder id.
+   UCHAR ucReserved[2];
+}ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3;
+
+// usDispPllConfig v1.2 for RoadRunner
+#define DISPPLL_CONFIG_DVO_RATE_SEL                0x0001     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_DDR_SPEED               0x0000     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_SDR_SPEED               0x0001     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_OUTPUT_SEL              0x000c     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_LOW12BIT                0x0000     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_UPPER12BIT              0x0004     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_24BIT                   0x0008     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_SS_ENABLE                   0x0010     // Only used when ucEncoderMode = DP or LVDS
+#define DISPPLL_CONFIG_COHERENT_MODE               0x0020     // Only used when ucEncoderMode = TMDS or HDMI
+#define DISPPLL_CONFIG_DUAL_LINK                   0x0040     // Only used when ucEncoderMode = TMDS or LVDS
+
+
+typedef struct _ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3
+{
+  ULONG ulDispPllFreq;                 // return display PPLL freq which is used to generate the pixclock, and related idclk, symclk etc
+  UCHAR ucRefDiv;                      // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider and post_div ( if it is not given )
+  UCHAR ucPostDiv;                     // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider
+  UCHAR ucReserved[2];
+}ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3;
+
+typedef struct _ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3
+{
+  union
+  {
+    ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3  sInput;
+    ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3 sOutput;
+  };
+} ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3;
+
+/****************************************************************************/
+// Structures used by EnableYUVTable
+/****************************************************************************/
+typedef struct _ENABLE_YUV_PARAMETERS
+{
+  UCHAR ucEnable;                     // ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB)
+  UCHAR ucCRTC;                       // Which CRTC needs this YUV or RGB format
+  UCHAR ucPadding[2];
+}ENABLE_YUV_PARAMETERS;
+#define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS
+
+/****************************************************************************/
+// Structures used by GetMemoryClockTable
+/****************************************************************************/
+typedef struct _GET_MEMORY_CLOCK_PARAMETERS
+{
+  ULONG ulReturnMemoryClock;          // current memory speed in 10KHz unit
+} GET_MEMORY_CLOCK_PARAMETERS;
+#define GET_MEMORY_CLOCK_PS_ALLOCATION  GET_MEMORY_CLOCK_PARAMETERS
+
+/****************************************************************************/
+// Structures used by GetEngineClockTable
+/****************************************************************************/
+typedef struct _GET_ENGINE_CLOCK_PARAMETERS
+{
+  ULONG ulReturnEngineClock;          // current engine speed in 10KHz unit
+} GET_ENGINE_CLOCK_PARAMETERS;
+#define GET_ENGINE_CLOCK_PS_ALLOCATION  GET_ENGINE_CLOCK_PARAMETERS
+
+/****************************************************************************/
+// Following Structures and constant may be obsolete
+/****************************************************************************/
+//Maxium 8 bytes,the data read in will be placed in the parameter space.
+//Read operaion successeful when the paramter space is non-zero, otherwise read operation failed
+typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
+{
+  USHORT    usPrescale;         //Ratio between Engine clock and I2C clock
+  USHORT    usVRAMAddress;      //Adress in Frame Buffer where to pace raw EDID
+  USHORT    usStatus;           //When use output: lower byte EDID checksum, high byte hardware status
+                                //WHen use input:  lower byte as 'byte to read':currently limited to 128byte or 1byte
+  UCHAR     ucSlaveAddr;        //Read from which slave
+  UCHAR     ucLineNumber;       //Read from which HW assisted line
+}READ_EDID_FROM_HW_I2C_DATA_PARAMETERS;
+#define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION  READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
+
+
+#define  ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE                  0
+#define  ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES              1
+#define  ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK       2
+#define  ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK  3
+#define  ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK       4
+
+typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+{
+  USHORT    usPrescale;         //Ratio between Engine clock and I2C clock
+  USHORT    usByteOffset;       //Write to which byte
+                                //Upper portion of usByteOffset is Format of data
+                                //1bytePS+offsetPS
+                                //2bytesPS+offsetPS
+                                //blockID+offsetPS
+                                //blockID+offsetID
+                                //blockID+counterID+offsetID
+  UCHAR     ucData;             //PS data1
+  UCHAR     ucStatus;           //Status byte 1=success, 2=failure, Also is used as PS data2
+  UCHAR     ucSlaveAddr;        //Write to which slave
+  UCHAR     ucLineNumber;       //Write from which HW assisted line
+}WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS;
+
+#define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION  WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS
+{
+  USHORT    usPrescale;         //Ratio between Engine clock and I2C clock
+  UCHAR     ucSlaveAddr;        //Write to which slave
+  UCHAR     ucLineNumber;       //Write from which HW assisted line
+}SET_UP_HW_I2C_DATA_PARAMETERS;
+
+/**************************************************************************/
+#define SPEED_FAN_CONTROL_PS_ALLOCATION   WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+
+/****************************************************************************/
+// Structures used by PowerConnectorDetectionTable
+/****************************************************************************/
+typedef struct   _POWER_CONNECTOR_DETECTION_PARAMETERS
+{
+  UCHAR   ucPowerConnectorStatus;      //Used for return value 0: detected, 1:not detected
+   UCHAR   ucPwrBehaviorId;
+   USHORT   usPwrBudget;                         //how much power currently boot to in unit of watt
+}POWER_CONNECTOR_DETECTION_PARAMETERS;
+
+typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION
+{
+  UCHAR   ucPowerConnectorStatus;      //Used for return value 0: detected, 1:not detected
+   UCHAR   ucReserved;
+   USHORT   usPwrBudget;                         //how much power currently boot to in unit of watt
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION    sReserved;
+}POWER_CONNECTOR_DETECTION_PS_ALLOCATION;
+
+
+/****************************LVDS SS Command Table Definitions**********************/
+
+/****************************************************************************/
+// Structures used by EnableSpreadSpectrumOnPPLLTable
+/****************************************************************************/
+typedef struct   _ENABLE_LVDS_SS_PARAMETERS
+{
+  USHORT  usSpreadSpectrumPercentage;
+  UCHAR   ucSpreadSpectrumType;           //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+  UCHAR   ucSpreadSpectrumStepSize_Delay; //bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY
+  UCHAR   ucEnable;                       //ATOM_ENABLE or ATOM_DISABLE
+  UCHAR   ucPadding[3];
+}ENABLE_LVDS_SS_PARAMETERS;
+
+//ucTableFormatRevision=1,ucTableContentRevision=2
+typedef struct   _ENABLE_LVDS_SS_PARAMETERS_V2
+{
+  USHORT  usSpreadSpectrumPercentage;
+  UCHAR   ucSpreadSpectrumType;           //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+  UCHAR   ucSpreadSpectrumStep;           //
+  UCHAR   ucEnable;                       //ATOM_ENABLE or ATOM_DISABLE
+  UCHAR   ucSpreadSpectrumDelay;
+  UCHAR   ucSpreadSpectrumRange;
+  UCHAR   ucPadding;
+}ENABLE_LVDS_SS_PARAMETERS_V2;
+
+//This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS.
+typedef struct   _ENABLE_SPREAD_SPECTRUM_ON_PPLL
+{
+  USHORT  usSpreadSpectrumPercentage;
+  UCHAR   ucSpreadSpectrumType;           // Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+  UCHAR   ucSpreadSpectrumStep;           //
+  UCHAR   ucEnable;                       // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR   ucSpreadSpectrumDelay;
+  UCHAR   ucSpreadSpectrumRange;
+  UCHAR   ucPpll;                                      // ATOM_PPLL1/ATOM_PPLL2
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL;
+
+ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2
+{
+  USHORT  usSpreadSpectrumPercentage;
+  UCHAR   ucSpreadSpectrumType;           // Bit[0]: 0-Down Spread,1-Center Spread.
+                                        // Bit[1]: 1-Ext. 0-Int.
+                                        // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
+                                        // Bits[7:4] reserved
+  UCHAR   ucEnable;                       // ATOM_ENABLE or ATOM_DISABLE
+  USHORT  usSpreadSpectrumAmount;         // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]
+  USHORT  usSpreadSpectrumStep;           // SS_STEP_SIZE_DSFRAC
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2;
+
+#define ATOM_PPLL_SS_TYPE_V2_DOWN_SPREAD      0x00
+#define ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD    0x01
+#define ATOM_PPLL_SS_TYPE_V2_EXT_SPREAD       0x02
+#define ATOM_PPLL_SS_TYPE_V2_PPLL_SEL_MASK    0x0c
+#define ATOM_PPLL_SS_TYPE_V2_P1PLL            0x00
+#define ATOM_PPLL_SS_TYPE_V2_P2PLL            0x04
+#define ATOM_PPLL_SS_TYPE_V2_DCPLL            0x08
+#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK     0x00FF
+#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_SHIFT    0
+#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK     0x0F00
+#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT    8
+
+// Used by DCE5.0
+ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3
+{
+  USHORT  usSpreadSpectrumAmountFrac;   // SS_AMOUNT_DSFRAC New in DCE5.0
+  UCHAR   ucSpreadSpectrumType;           // Bit[0]: 0-Down Spread,1-Center Spread.
+                                        // Bit[1]: 1-Ext. 0-Int.
+                                        // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
+                                        // Bits[7:4] reserved
+  UCHAR   ucEnable;                       // ATOM_ENABLE or ATOM_DISABLE
+  USHORT  usSpreadSpectrumAmount;         // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]
+  USHORT  usSpreadSpectrumStep;           // SS_STEP_SIZE_DSFRAC
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3;
+
+
+#define ATOM_PPLL_SS_TYPE_V3_DOWN_SPREAD      0x00
+#define ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD    0x01
+#define ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD       0x02
+#define ATOM_PPLL_SS_TYPE_V3_PPLL_SEL_MASK    0x0c
+#define ATOM_PPLL_SS_TYPE_V3_P1PLL            0x00
+#define ATOM_PPLL_SS_TYPE_V3_P2PLL            0x04
+#define ATOM_PPLL_SS_TYPE_V3_DCPLL            0x08
+#define ATOM_PPLL_SS_TYPE_V3_P0PLL            ATOM_PPLL_SS_TYPE_V3_DCPLL
+#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK     0x00FF
+#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT    0
+#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK     0x0F00
+#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT    8
+
+#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION  ENABLE_SPREAD_SPECTRUM_ON_PPLL
+
+typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION
+{
+  PIXEL_CLOCK_PARAMETERS sPCLKInput;
+  ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;//Caller doesn't need to init this portion
+}SET_PIXEL_CLOCK_PS_ALLOCATION;
+
+
+
+#define ENABLE_VGA_RENDER_PS_ALLOCATION   SET_PIXEL_CLOCK_PS_ALLOCATION
+
+/****************************************************************************/
+// Structures used by ###
+/****************************************************************************/
+typedef struct   _MEMORY_TRAINING_PARAMETERS
+{
+  ULONG ulTargetMemoryClock;          //In 10Khz unit
+}MEMORY_TRAINING_PARAMETERS;
+#define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS
+
+
+typedef struct   _MEMORY_TRAINING_PARAMETERS_V1_2
+{
+  USHORT usMemTrainingMode;
+  USHORT usReserved;
+}MEMORY_TRAINING_PARAMETERS_V1_2;
+
+//usMemTrainingMode
+#define NORMAL_MEMORY_TRAINING_MODE       0
+#define ENTER_DRAM_SELFREFRESH_MODE       1
+#define EXIT_DRAM_SELFRESH_MODE           2
+
+/****************************LVDS and other encoder command table definitions **********************/
+
+
+/****************************************************************************/
+// Structures used by LVDSEncoderControlTable   (Before DEC30)
+//                    LVTMAEncoderControlTable  (Before DEC30)
+//                    TMDSAEncoderControlTable  (Before DEC30)
+/****************************************************************************/
+typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS
+{
+  USHORT usPixelClock;  // in 10KHz; for bios convenient
+  UCHAR  ucMisc;        // bit0=0: Enable single link
+                        //     =1: Enable dual link
+                        // Bit1=0: 666RGB
+                        //     =1: 888RGB
+  UCHAR  ucAction;      // 0: turn off encoder
+                        // 1: setup and turn on encoder
+}LVDS_ENCODER_CONTROL_PARAMETERS;
+
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION  LVDS_ENCODER_CONTROL_PARAMETERS
+
+#define TMDS1_ENCODER_CONTROL_PARAMETERS    LVDS_ENCODER_CONTROL_PARAMETERS
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS    TMDS1_ENCODER_CONTROL_PARAMETERS
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS
+
+//ucTableFormatRevision=1,ucTableContentRevision=2
+typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2
+{
+  USHORT usPixelClock;  // in 10KHz; for bios convenient
+  UCHAR  ucMisc;        // see PANEL_ENCODER_MISC_xx defintions below
+  UCHAR  ucAction;      // 0: turn off encoder
+                        // 1: setup and turn on encoder
+  UCHAR  ucTruncate;    // bit0=0: Disable truncate
+                        //     =1: Enable truncate
+                        // bit4=0: 666RGB
+                        //     =1: 888RGB
+  UCHAR  ucSpatial;     // bit0=0: Disable spatial dithering
+                        //     =1: Enable spatial dithering
+                        // bit4=0: 666RGB
+                        //     =1: 888RGB
+  UCHAR  ucTemporal;    // bit0=0: Disable temporal dithering
+                        //     =1: Enable temporal dithering
+                        // bit4=0: 666RGB
+                        //     =1: 888RGB
+                        // bit5=0: Gray level 2
+                        //     =1: Gray level 4
+  UCHAR  ucFRC;         // bit4=0: 25FRC_SEL pattern E
+                        //     =1: 25FRC_SEL pattern F
+                        // bit6:5=0: 50FRC_SEL pattern A
+                        //       =1: 50FRC_SEL pattern B
+                        //       =2: 50FRC_SEL pattern C
+                        //       =3: 50FRC_SEL pattern D
+                        // bit7=0: 75FRC_SEL pattern E
+                        //     =1: 75FRC_SEL pattern F
+}LVDS_ENCODER_CONTROL_PARAMETERS_V2;
+
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2  LVDS_ENCODER_CONTROL_PARAMETERS_V2
+
+#define TMDS1_ENCODER_CONTROL_PARAMETERS_V2    LVDS_ENCODER_CONTROL_PARAMETERS_V2
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS_V2    TMDS1_ENCODER_CONTROL_PARAMETERS_V2
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2
+
+
+#define LVDS_ENCODER_CONTROL_PARAMETERS_V3     LVDS_ENCODER_CONTROL_PARAMETERS_V2
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V3  LVDS_ENCODER_CONTROL_PARAMETERS_V3
+
+#define TMDS1_ENCODER_CONTROL_PARAMETERS_V3    LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS1_ENCODER_CONTROL_PARAMETERS_V3
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS_V3    LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3
+
+/****************************************************************************/
+// Structures used by ###
+/****************************************************************************/
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS
+{
+  UCHAR    ucEnable;            // Enable or Disable External TMDS encoder
+  UCHAR    ucMisc;              // Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB}
+  UCHAR    ucPadding[2];
+}ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS;
+
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION
+{
+  ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS    sXTmdsEncoder;
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION   sReserved;     //Caller doesn't need to init this portion
+}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION;
+
+#define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2  LVDS_ENCODER_CONTROL_PARAMETERS_V2
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2
+{
+  ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2    sXTmdsEncoder;
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION      sReserved;     //Caller doesn't need to init this portion
+}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2;
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION
+{
+  DIG_ENCODER_CONTROL_PARAMETERS            sDigEncoder;
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structures used by DVOEncoderControlTable
+/****************************************************************************/
+//ucTableFormatRevision=1,ucTableContentRevision=3
+//ucDVOConfig:
+#define DVO_ENCODER_CONFIG_RATE_SEL                     0x01
+#define DVO_ENCODER_CONFIG_DDR_SPEED                  0x00
+#define DVO_ENCODER_CONFIG_SDR_SPEED                  0x01
+#define DVO_ENCODER_CONFIG_OUTPUT_SEL                  0x0c
+#define DVO_ENCODER_CONFIG_LOW12BIT                     0x00
+#define DVO_ENCODER_CONFIG_UPPER12BIT                  0x04
+#define DVO_ENCODER_CONFIG_24BIT                        0x08
+
+typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3
+{
+  USHORT usPixelClock;
+  UCHAR  ucDVOConfig;
+  UCHAR  ucAction;                                          //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
+  UCHAR  ucReseved[4];
+}DVO_ENCODER_CONTROL_PARAMETERS_V3;
+#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3   DVO_ENCODER_CONTROL_PARAMETERS_V3
+
+typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V1_4
+{
+  USHORT usPixelClock;
+  UCHAR  ucDVOConfig;
+  UCHAR  ucAction;                                          //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
+  UCHAR  ucBitPerColor;                       //please refer to definition of PANEL_xBIT_PER_COLOR
+  UCHAR  ucReseved[3];
+}DVO_ENCODER_CONTROL_PARAMETERS_V1_4;
+#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V1_4   DVO_ENCODER_CONTROL_PARAMETERS_V1_4
+
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for
+// bit1=0: non-coherent mode
+//     =1: coherent mode
+
+//==========================================================================================
+//Only change is here next time when changing encoder parameter definitions again!
+#define LVDS_ENCODER_CONTROL_PARAMETERS_LAST     LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST  LVDS_ENCODER_CONTROL_PARAMETERS_LAST
+
+#define TMDS1_ENCODER_CONTROL_PARAMETERS_LAST    LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS1_ENCODER_CONTROL_PARAMETERS_LAST
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS_LAST    LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS2_ENCODER_CONTROL_PARAMETERS_LAST
+
+#define DVO_ENCODER_CONTROL_PARAMETERS_LAST      DVO_ENCODER_CONTROL_PARAMETERS
+#define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST   DVO_ENCODER_CONTROL_PS_ALLOCATION
+
+//==========================================================================================
+#define PANEL_ENCODER_MISC_DUAL                0x01
+#define PANEL_ENCODER_MISC_COHERENT            0x02
+#define   PANEL_ENCODER_MISC_TMDS_LINKB                0x04
+#define   PANEL_ENCODER_MISC_HDMI_TYPE                0x08
+
+#define PANEL_ENCODER_ACTION_DISABLE           ATOM_DISABLE
+#define PANEL_ENCODER_ACTION_ENABLE            ATOM_ENABLE
+#define PANEL_ENCODER_ACTION_COHERENTSEQ       (ATOM_ENABLE+1)
+
+#define PANEL_ENCODER_TRUNCATE_EN              0x01
+#define PANEL_ENCODER_TRUNCATE_DEPTH           0x10
+#define PANEL_ENCODER_SPATIAL_DITHER_EN        0x01
+#define PANEL_ENCODER_SPATIAL_DITHER_DEPTH     0x10
+#define PANEL_ENCODER_TEMPORAL_DITHER_EN       0x01
+#define PANEL_ENCODER_TEMPORAL_DITHER_DEPTH    0x10
+#define PANEL_ENCODER_TEMPORAL_LEVEL_4         0x20
+#define PANEL_ENCODER_25FRC_MASK               0x10
+#define PANEL_ENCODER_25FRC_E                  0x00
+#define PANEL_ENCODER_25FRC_F                  0x10
+#define PANEL_ENCODER_50FRC_MASK               0x60
+#define PANEL_ENCODER_50FRC_A                  0x00
+#define PANEL_ENCODER_50FRC_B                  0x20
+#define PANEL_ENCODER_50FRC_C                  0x40
+#define PANEL_ENCODER_50FRC_D                  0x60
+#define PANEL_ENCODER_75FRC_MASK               0x80
+#define PANEL_ENCODER_75FRC_E                  0x00
+#define PANEL_ENCODER_75FRC_F                  0x80
+
+/****************************************************************************/
+// Structures used by SetVoltageTable
+/****************************************************************************/
+#define SET_VOLTAGE_TYPE_ASIC_VDDC             1
+#define SET_VOLTAGE_TYPE_ASIC_MVDDC            2
+#define SET_VOLTAGE_TYPE_ASIC_MVDDQ            3
+#define SET_VOLTAGE_TYPE_ASIC_VDDCI            4
+#define SET_VOLTAGE_INIT_MODE                  5
+#define SET_VOLTAGE_GET_MAX_VOLTAGE            6               //Gets the Max. voltage for the soldered Asic
+
+#define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE       0x1
+#define SET_ASIC_VOLTAGE_MODE_SOURCE_A         0x2
+#define SET_ASIC_VOLTAGE_MODE_SOURCE_B         0x4
+
+#define   SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE      0x0
+#define   SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL      0x1
+#define   SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK     0x2
+
+typedef struct   _SET_VOLTAGE_PARAMETERS
+{
+  UCHAR    ucVoltageType;               // To tell which voltage to set up, VDDC/MVDDC/MVDDQ
+  UCHAR    ucVoltageMode;               // To set all, to set source A or source B or ...
+  UCHAR    ucVoltageIndex;              // An index to tell which voltage level
+  UCHAR    ucReserved;
+}SET_VOLTAGE_PARAMETERS;
+
+typedef struct   _SET_VOLTAGE_PARAMETERS_V2
+{
+  UCHAR    ucVoltageType;               // To tell which voltage to set up, VDDC/MVDDC/MVDDQ
+  UCHAR    ucVoltageMode;               // Not used, maybe use for state machine for differen power mode
+  USHORT   usVoltageLevel;              // real voltage level
+}SET_VOLTAGE_PARAMETERS_V2;
+
+// used by both SetVoltageTable v1.3 and v1.4
+typedef struct   _SET_VOLTAGE_PARAMETERS_V1_3
+{
+  UCHAR    ucVoltageType;               // To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
+  UCHAR    ucVoltageMode;               // Indicate action: Set voltage level
+  USHORT   usVoltageLevel;              // real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. )
+}SET_VOLTAGE_PARAMETERS_V1_3;
+
+//ucVoltageType
+#define VOLTAGE_TYPE_VDDC                    1
+#define VOLTAGE_TYPE_MVDDC                   2
+#define VOLTAGE_TYPE_MVDDQ                   3
+#define VOLTAGE_TYPE_VDDCI                   4
+#define VOLTAGE_TYPE_VDDGFX                  5
+#define VOLTAGE_TYPE_PCC                     6
+
+#define VOLTAGE_TYPE_GENERIC_I2C_1           0x11
+#define VOLTAGE_TYPE_GENERIC_I2C_2           0x12
+#define VOLTAGE_TYPE_GENERIC_I2C_3           0x13
+#define VOLTAGE_TYPE_GENERIC_I2C_4           0x14
+#define VOLTAGE_TYPE_GENERIC_I2C_5           0x15
+#define VOLTAGE_TYPE_GENERIC_I2C_6           0x16
+#define VOLTAGE_TYPE_GENERIC_I2C_7           0x17
+#define VOLTAGE_TYPE_GENERIC_I2C_8           0x18
+#define VOLTAGE_TYPE_GENERIC_I2C_9           0x19
+#define VOLTAGE_TYPE_GENERIC_I2C_10          0x1A
+
+//SET_VOLTAGE_PARAMETERS_V3.ucVoltageMode
+#define ATOM_SET_VOLTAGE                     0        //Set voltage Level
+#define ATOM_INIT_VOLTAGE_REGULATOR          3        //Init Regulator
+#define ATOM_SET_VOLTAGE_PHASE               4        //Set Vregulator Phase, only for SVID/PVID regulator
+#define ATOM_GET_MAX_VOLTAGE                 6        //Get Max Voltage, not used from SetVoltageTable v1.3
+#define ATOM_GET_VOLTAGE_LEVEL               6        //Get Voltage level from vitual voltage ID, not used for SetVoltage v1.4
+#define ATOM_GET_LEAKAGE_ID                  8        //Get Leakage Voltage Id ( starting from SMU7x IP ), SetVoltage v1.4
+
+// define vitual voltage id in usVoltageLevel
+#define ATOM_VIRTUAL_VOLTAGE_ID0             0xff01
+#define ATOM_VIRTUAL_VOLTAGE_ID1             0xff02
+#define ATOM_VIRTUAL_VOLTAGE_ID2             0xff03
+#define ATOM_VIRTUAL_VOLTAGE_ID3             0xff04
+#define ATOM_VIRTUAL_VOLTAGE_ID4             0xff05
+#define ATOM_VIRTUAL_VOLTAGE_ID5             0xff06
+#define ATOM_VIRTUAL_VOLTAGE_ID6             0xff07
+#define ATOM_VIRTUAL_VOLTAGE_ID7             0xff08
+
+typedef struct _SET_VOLTAGE_PS_ALLOCATION
+{
+  SET_VOLTAGE_PARAMETERS sASICSetVoltage;
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+}SET_VOLTAGE_PS_ALLOCATION;
+
+// New Added from SI for GetVoltageInfoTable, input parameter structure
+typedef struct  _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1
+{
+  UCHAR    ucVoltageType;               // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
+  UCHAR    ucVoltageMode;               // Input: Indicate action: Get voltage info
+  USHORT   usVoltageLevel;              // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id
+  ULONG    ulReserved;
+}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1;
+
+// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_VID
+typedef struct  _GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
+{
+  ULONG    ulVotlageGpioState;
+  ULONG    ulVoltageGPioMask;
+}GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1;
+
+// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_STATEx_LEAKAGE_VID
+typedef struct  _GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
+{
+  USHORT   usVoltageLevel;
+  USHORT   usVoltageId;                                  // Voltage Id programmed in Voltage Regulator
+  ULONG    ulReseved;
+}GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1;
+
+// GetVoltageInfo v1.1 ucVoltageMode
+#define ATOM_GET_VOLTAGE_VID                0x00
+#define ATOM_GET_VOTLAGE_INIT_SEQ           0x03
+#define ATOM_GET_VOLTTAGE_PHASE_PHASE_VID   0x04
+#define ATOM_GET_VOLTAGE_SVID2              0x07        //Get SVI2 Regulator Info
+
+// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state
+#define   ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
+// for SI, this state map to 0xff01 voltage state in Power Play table, which is performance state
+#define   ATOM_GET_VOLTAGE_STATE1_LEAKAGE_VID 0x11
+
+#define   ATOM_GET_VOLTAGE_STATE2_LEAKAGE_VID 0x12
+#define   ATOM_GET_VOLTAGE_STATE3_LEAKAGE_VID 0x13
+
+
+// New Added from CI Hawaii for GetVoltageInfoTable, input parameter structure
+typedef struct  _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2
+{
+  UCHAR    ucVoltageType;               // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
+  UCHAR    ucVoltageMode;               // Input: Indicate action: Get voltage info
+  USHORT   usVoltageLevel;              // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id
+  ULONG    ulSCLKFreq;                  // Input: when ucVoltageMode= ATOM_GET_VOLTAGE_EVV_VOLTAGE, DPM state SCLK frequency, Define in PPTable SCLK/Voltage dependence table
+}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2;
+
+// New in GetVoltageInfo v1.2 ucVoltageMode
+#define ATOM_GET_VOLTAGE_EVV_VOLTAGE        0x09
+
+// New Added from CI Hawaii for EVV feature
+typedef struct  _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2
+{
+  USHORT   usVoltageLevel;                               // real voltage level in unit of mv
+  USHORT   usVoltageId;                                  // Voltage Id programmed in Voltage Regulator
+  USHORT   usTDP_Current;                                // TDP_Current in unit of  0.01A
+  USHORT   usTDP_Power;                                  // TDP_Current in unit  of 0.1W
+}GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2;
+
+/****************************************************************************/
+// Structures used by TVEncoderControlTable
+/****************************************************************************/
+typedef struct _TV_ENCODER_CONTROL_PARAMETERS
+{
+  USHORT usPixelClock;                // in 10KHz; for bios convenient
+  UCHAR  ucTvStandard;                // See definition "ATOM_TV_NTSC ..."
+  UCHAR  ucAction;                    // 0: turn off encoder
+                                      // 1: setup and turn on encoder
+}TV_ENCODER_CONTROL_PARAMETERS;
+
+typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION
+{
+  TV_ENCODER_CONTROL_PARAMETERS sTVEncoder;
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION    sReserved; // Don't set this one
+}TV_ENCODER_CONTROL_PS_ALLOCATION;
+
+//==============================Data Table Portion====================================
+
+
+/****************************************************************************/
+// Structure used in Data.mtb
+/****************************************************************************/
+typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
+{
+  USHORT        UtilityPipeLine;          // Offest for the utility to get parser info,Don't change this position!
+  USHORT        MultimediaCapabilityInfo; // Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios
+  USHORT        MultimediaConfigInfo;     // Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios
+  USHORT        StandardVESA_Timing;      // Only used by Bios
+  USHORT        FirmwareInfo;             // Shared by various SW components,latest version 1.4
+  USHORT        PaletteData;              // Only used by BIOS
+  USHORT        LCD_Info;                 // Shared by various SW components,latest version 1.3, was called LVDS_Info
+  USHORT        DIGTransmitterInfo;       // Internal used by VBIOS only version 3.1
+  USHORT        AnalogTV_Info;            // Shared by various SW components,latest version 1.1
+  USHORT        SupportedDevicesInfo;     // Will be obsolete from R600
+  USHORT        GPIO_I2C_Info;            // Shared by various SW components,latest version 1.2 will be used from R600
+  USHORT        VRAM_UsageByFirmware;     // Shared by various SW components,latest version 1.3 will be used from R600
+  USHORT        GPIO_Pin_LUT;             // Shared by various SW components,latest version 1.1
+  USHORT        VESA_ToInternalModeLUT;   // Only used by Bios
+  USHORT        ComponentVideoInfo;       // Shared by various SW components,latest version 2.1 will be used from R600
+  USHORT        PowerPlayInfo;            // Shared by various SW components,latest version 2.1,new design from R600
+  USHORT        GPUVirtualizationInfo;    // Will be obsolete from R600
+  USHORT        SaveRestoreInfo;          // Only used by Bios
+  USHORT        PPLL_SS_Info;             // Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info
+  USHORT        OemInfo;                  // Defined and used by external SW, should be obsolete soon
+  USHORT        XTMDS_Info;               // Will be obsolete from R600
+  USHORT        MclkSS_Info;              // Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used
+  USHORT        Object_Header;            // Shared by various SW components,latest version 1.1
+  USHORT        IndirectIOAccess;         // Only used by Bios,this table position can't change at all!!
+  USHORT        MC_InitParameter;         // Only used by command table
+  USHORT        ASIC_VDDC_Info;           // Will be obsolete from R600
+  USHORT        ASIC_InternalSS_Info;     // New tabel name from R600, used to be called "ASIC_MVDDC_Info"
+  USHORT        TV_VideoMode;             // Only used by command table
+  USHORT        VRAM_Info;                // Only used by command table, latest version 1.3
+  USHORT        MemoryTrainingInfo;       // Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1
+  USHORT        IntegratedSystemInfo;     // Shared by various SW components
+  USHORT        ASIC_ProfilingInfo;       // New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600
+  USHORT        VoltageObjectInfo;        // Shared by various SW components, latest version 1.1
+  USHORT        PowerSourceInfo;          // Shared by various SW components, latest versoin 1.1
+  USHORT	      ServiceInfo;
+}ATOM_MASTER_LIST_OF_DATA_TABLES;
+
+typedef struct _ATOM_MASTER_DATA_TABLE
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  ATOM_MASTER_LIST_OF_DATA_TABLES   ListOfDataTables;
+}ATOM_MASTER_DATA_TABLE;
+
+// For backward compatible
+#define LVDS_Info                LCD_Info
+#define DAC_Info                 PaletteData
+#define TMDS_Info                DIGTransmitterInfo
+#define CompassionateData        GPUVirtualizationInfo
+
+/****************************************************************************/
+// Structure used in MultimediaCapabilityInfoTable
+/****************************************************************************/
+typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  ULONG                    ulSignature;      // HW info table signature string "$ATI"
+  UCHAR                    ucI2C_Type;       // I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc)
+  UCHAR                    ucTV_OutInfo;     // Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7)
+  UCHAR                    ucVideoPortInfo;  // Provides the video port capabilities
+  UCHAR                    ucHostPortInfo;   // Provides host port configuration information
+}ATOM_MULTIMEDIA_CAPABILITY_INFO;
+
+
+/****************************************************************************/
+// Structure used in MultimediaConfigInfoTable
+/****************************************************************************/
+typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  ULONG                    ulSignature;      // MM info table signature sting "$MMT"
+  UCHAR                    ucTunerInfo;      // Type of tuner installed on the adapter (4:0) and video input for tuner (7:5)
+  UCHAR                    ucAudioChipInfo;  // List the audio chip type (3:0) product type (4) and OEM revision (7:5)
+  UCHAR                    ucProductID;      // Defines as OEM ID or ATI board ID dependent on product type setting
+  UCHAR                    ucMiscInfo1;      // Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7)
+  UCHAR                    ucMiscInfo2;      // I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6)
+  UCHAR                    ucMiscInfo3;      // Video Decoder Type (3:0) Video In Standard/Crystal (7:4)
+  UCHAR                    ucMiscInfo4;      // Video Decoder Host Config (2:0) reserved (7:3)
+  UCHAR                    ucVideoInput0Info;// Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+  UCHAR                    ucVideoInput1Info;// Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+  UCHAR                    ucVideoInput2Info;// Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+  UCHAR                    ucVideoInput3Info;// Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+  UCHAR                    ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+}ATOM_MULTIMEDIA_CONFIG_INFO;
+
+
+/****************************************************************************/
+// Structures used in FirmwareInfoTable
+/****************************************************************************/
+
+// usBIOSCapability Defintion:
+// Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted;
+// Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported;
+// Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported;
+// Others: Reserved
+#define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED         0x0001
+#define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT            0x0002
+#define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT     0x0004
+#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT      0x0008      // (valid from v1.1 ~v1.4):=1: memclk SS enable, =0 memclk SS disable.
+#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT      0x0010      // (valid from v1.1 ~v1.4):=1: engclk SS enable, =0 engclk SS disable.
+#define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU         0x0020
+#define ATOM_BIOS_INFO_WMI_SUPPORT                  0x0040
+#define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM   0x0080
+#define ATOM_BIOS_INFO_HYPERMEMORY_SUPPORT          0x0100
+#define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK        0x1E00
+#define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000
+#define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE  0x4000
+#define ATOM_BIOS_INFO_MEMORY_CLOCK_EXT_SS_SUPPORT  0x0008      // (valid from v2.1 ): =1: memclk ss enable with external ss chip
+#define ATOM_BIOS_INFO_ENGINE_CLOCK_EXT_SS_SUPPORT  0x0010      // (valid from v2.1 ): =1: engclk ss enable with external ss chip
+
+
+#ifndef _H2INC
+
+//Please don't add or expand this bitfield structure below, this one will retire soon.!
+typedef struct _ATOM_FIRMWARE_CAPABILITY
+{
+#if ATOM_BIG_ENDIAN
+  USHORT Reserved:1;
+  USHORT SCL2Redefined:1;
+  USHORT PostWithoutModeSet:1;
+  USHORT HyperMemory_Size:4;
+  USHORT HyperMemory_Support:1;
+  USHORT PPMode_Assigned:1;
+  USHORT WMI_SUPPORT:1;
+  USHORT GPUControlsBL:1;
+  USHORT EngineClockSS_Support:1;
+  USHORT MemoryClockSS_Support:1;
+  USHORT ExtendedDesktopSupport:1;
+  USHORT DualCRTC_Support:1;
+  USHORT FirmwarePosted:1;
+#else
+  USHORT FirmwarePosted:1;
+  USHORT DualCRTC_Support:1;
+  USHORT ExtendedDesktopSupport:1;
+  USHORT MemoryClockSS_Support:1;
+  USHORT EngineClockSS_Support:1;
+  USHORT GPUControlsBL:1;
+  USHORT WMI_SUPPORT:1;
+  USHORT PPMode_Assigned:1;
+  USHORT HyperMemory_Support:1;
+  USHORT HyperMemory_Size:4;
+  USHORT PostWithoutModeSet:1;
+  USHORT SCL2Redefined:1;
+  USHORT Reserved:1;
+#endif
+}ATOM_FIRMWARE_CAPABILITY;
+
+typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS
+{
+  ATOM_FIRMWARE_CAPABILITY sbfAccess;
+  USHORT                   susAccess;
+}ATOM_FIRMWARE_CAPABILITY_ACCESS;
+
+#else
+
+typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS
+{
+  USHORT                   susAccess;
+}ATOM_FIRMWARE_CAPABILITY_ACCESS;
+
+#endif
+
+typedef struct _ATOM_FIRMWARE_INFO
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader;
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulDriverTargetEngineClock;  //In 10Khz unit
+  ULONG                           ulDriverTargetMemoryClock;  //In 10Khz unit
+  ULONG                           ulMaxEngineClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxMemoryClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulASICMaxEngineClock;       //In 10Khz unit
+  ULONG                           ulASICMaxMemoryClock;       //In 10Khz unit
+  UCHAR                           ucASICMaxTemperature;
+  UCHAR                           ucPadding[3];               //Don't use them
+  ULONG                           aulReservedForBIOS[3];      //Don't use them
+  USHORT                          usMinEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Output; //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Output; //In 10Khz unit
+  USHORT                          usMaxPixelClock;            //In 10Khz unit, Max.  Pclk
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMinPixelClockPLL_Output;  //In 10Khz unit, the definitions above can't change!!!
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usReferenceClock;           //In 10Khz unit
+  USHORT                          usPM_RTS_Location;          //RTS PM4 starting location in ROM in 1Kb unit
+  UCHAR                           ucPM_RTS_StreamSize;        //RTS PM4 packets in Kb unit
+  UCHAR                           ucDesign_ID;                //Indicate what is the board design
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+}ATOM_FIRMWARE_INFO;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_2
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader;
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulDriverTargetEngineClock;  //In 10Khz unit
+  ULONG                           ulDriverTargetMemoryClock;  //In 10Khz unit
+  ULONG                           ulMaxEngineClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxMemoryClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulASICMaxEngineClock;       //In 10Khz unit
+  ULONG                           ulASICMaxMemoryClock;       //In 10Khz unit
+  UCHAR                           ucASICMaxTemperature;
+  UCHAR                           ucMinAllowedBL_Level;
+  UCHAR                           ucPadding[2];               //Don't use them
+  ULONG                           aulReservedForBIOS[2];      //Don't use them
+  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Output; //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Output; //In 10Khz unit
+  USHORT                          usMaxPixelClock;            //In 10Khz unit, Max.  Pclk
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMinPixelClockPLL_Output;  //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usReferenceClock;           //In 10Khz unit
+  USHORT                          usPM_RTS_Location;          //RTS PM4 starting location in ROM in 1Kb unit
+  UCHAR                           ucPM_RTS_StreamSize;        //RTS PM4 packets in Kb unit
+  UCHAR                           ucDesign_ID;                //Indicate what is the board design
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_2;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_3
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader;
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulDriverTargetEngineClock;  //In 10Khz unit
+  ULONG                           ulDriverTargetMemoryClock;  //In 10Khz unit
+  ULONG                           ulMaxEngineClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxMemoryClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulASICMaxEngineClock;       //In 10Khz unit
+  ULONG                           ulASICMaxMemoryClock;       //In 10Khz unit
+  UCHAR                           ucASICMaxTemperature;
+  UCHAR                           ucMinAllowedBL_Level;
+  UCHAR                           ucPadding[2];               //Don't use them
+  ULONG                           aulReservedForBIOS;         //Don't use them
+  ULONG                           ul3DAccelerationEngineClock;//In 10Khz unit
+  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Output; //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Output; //In 10Khz unit
+  USHORT                          usMaxPixelClock;            //In 10Khz unit, Max.  Pclk
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMinPixelClockPLL_Output;  //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usReferenceClock;           //In 10Khz unit
+  USHORT                          usPM_RTS_Location;          //RTS PM4 starting location in ROM in 1Kb unit
+  UCHAR                           ucPM_RTS_StreamSize;        //RTS PM4 packets in Kb unit
+  UCHAR                           ucDesign_ID;                //Indicate what is the board design
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_3;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_4
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader;
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulDriverTargetEngineClock;  //In 10Khz unit
+  ULONG                           ulDriverTargetMemoryClock;  //In 10Khz unit
+  ULONG                           ulMaxEngineClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxMemoryClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulASICMaxEngineClock;       //In 10Khz unit
+  ULONG                           ulASICMaxMemoryClock;       //In 10Khz unit
+  UCHAR                           ucASICMaxTemperature;
+  UCHAR                           ucMinAllowedBL_Level;
+  USHORT                          usBootUpVDDCVoltage;        //In MV unit
+  USHORT                          usLcdMinPixelClockPLL_Output; // In MHz unit
+  USHORT                          usLcdMaxPixelClockPLL_Output; // In MHz unit
+  ULONG                           ul3DAccelerationEngineClock;//In 10Khz unit
+  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Output; //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Output; //In 10Khz unit
+  USHORT                          usMaxPixelClock;            //In 10Khz unit, Max.  Pclk
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMinPixelClockPLL_Output;  //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usReferenceClock;           //In 10Khz unit
+  USHORT                          usPM_RTS_Location;          //RTS PM4 starting location in ROM in 1Kb unit
+  UCHAR                           ucPM_RTS_StreamSize;        //RTS PM4 packets in Kb unit
+  UCHAR                           ucDesign_ID;                //Indicate what is the board design
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_4;
+
+//the structure below to be used from Cypress
+typedef struct _ATOM_FIRMWARE_INFO_V2_1
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader;
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulReserved1;
+  ULONG                           ulReserved2;
+  ULONG                           ulMaxEngineClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxMemoryClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulBinaryAlteredInfo;        //Was ulASICMaxEngineClock
+  ULONG                           ulDefaultDispEngineClkFreq; //In 10Khz unit
+  UCHAR                           ucReserved1;                //Was ucASICMaxTemperature;
+  UCHAR                           ucMinAllowedBL_Level;
+  USHORT                          usBootUpVDDCVoltage;        //In MV unit
+  USHORT                          usLcdMinPixelClockPLL_Output; // In MHz unit
+  USHORT                          usLcdMaxPixelClockPLL_Output; // In MHz unit
+  ULONG                           ulReserved4;                //Was ulAsicMaximumVoltage
+  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Output; //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Output; //In 10Khz unit
+  USHORT                          usMaxPixelClock;            //In 10Khz unit, Max.  Pclk
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMinPixelClockPLL_Output;  //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usCoreReferenceClock;       //In 10Khz unit
+  USHORT                          usMemoryReferenceClock;     //In 10Khz unit
+  USHORT                          usUniphyDPModeExtClkFreq;   //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+  UCHAR                           ucReserved4[3];
+
+}ATOM_FIRMWARE_INFO_V2_1;
+
+//the structure below to be used from NI
+//ucTableFormatRevision=2
+//ucTableContentRevision=2
+
+typedef struct _PRODUCT_BRANDING
+{
+    UCHAR     ucEMBEDDED_CAP:2;          // Bit[1:0] Embedded feature level
+    UCHAR     ucReserved:2;              // Bit[3:2] Reserved
+    UCHAR     ucBRANDING_ID:4;           // Bit[7:4] Branding ID
+}PRODUCT_BRANDING;
+
+typedef struct _ATOM_FIRMWARE_INFO_V2_2
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader;
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulSPLL_OutputFreq;          //In 10Khz unit
+  ULONG                           ulGPUPLL_OutputFreq;        //In 10Khz unit
+  ULONG                           ulReserved1;                //Was ulMaxEngineClockPLL_Output; //In 10Khz unit*
+  ULONG                           ulReserved2;                //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit*
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulBinaryAlteredInfo;        //Was ulASICMaxEngineClock  ?
+  ULONG                           ulDefaultDispEngineClkFreq; //In 10Khz unit. This is the frequency before DCDTO, corresponding to usBootUpVDDCVoltage.
+  UCHAR                           ucReserved3;                //Was ucASICMaxTemperature;
+  UCHAR                           ucMinAllowedBL_Level;
+  USHORT                          usBootUpVDDCVoltage;        //In MV unit
+  USHORT                          usLcdMinPixelClockPLL_Output; // In MHz unit
+  USHORT                          usLcdMaxPixelClockPLL_Output; // In MHz unit
+  ULONG                           ulReserved4;                //Was ulAsicMaximumVoltage
+  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
+  UCHAR                           ucRemoteDisplayConfig;
+  UCHAR                           ucReserved5[3];             //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
+  ULONG                           ulReserved6;                //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input
+  ULONG                           ulReserved7;                //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output
+  USHORT                          usReserved11;               //Was usMaxPixelClock;  //In 10Khz unit, Max.  Pclk used only for DAC
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usBootUpVDDCIVoltage;       //In unit of mv; Was usMinPixelClockPLL_Output;
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usCoreReferenceClock;       //In 10Khz unit
+  USHORT                          usMemoryReferenceClock;     //In 10Khz unit
+  USHORT                          usUniphyDPModeExtClkFreq;   //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+  UCHAR                           ucCoolingSolution_ID;       //0: Air cooling; 1: Liquid cooling ... [COOLING_SOLUTION]
+  PRODUCT_BRANDING                ucProductBranding;          // Bit[7:4]ucBRANDING_ID: Branding ID, Bit[3:2]ucReserved: Reserved, Bit[1:0]ucEMBEDDED_CAP: Embedded feature level.
+  UCHAR                           ucReserved9;
+  USHORT                          usBootUpMVDDCVoltage;       //In unit of mv; Was usMinPixelClockPLL_Output;
+  USHORT                          usBootUpVDDGFXVoltage;      //In unit of mv;
+  ULONG                           ulReserved10[3];            // New added comparing to previous version
+}ATOM_FIRMWARE_INFO_V2_2;
+
+#define ATOM_FIRMWARE_INFO_LAST  ATOM_FIRMWARE_INFO_V2_2
+
+
+// definition of ucRemoteDisplayConfig
+#define REMOTE_DISPLAY_DISABLE                   0x00
+#define REMOTE_DISPLAY_ENABLE                    0x01
+
+/****************************************************************************/
+// Structures used in IntegratedSystemInfoTable
+/****************************************************************************/
+#define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN      0x2
+#define IGP_CAP_FLAG_AC_CARD               0x4
+#define IGP_CAP_FLAG_SDVO_CARD             0x8
+#define IGP_CAP_FLAG_POSTDIV_BY_2_MODE     0x10
+
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader;
+  ULONG                           ulBootUpEngineClock;          //in 10kHz unit
+  ULONG                           ulBootUpMemoryClock;          //in 10kHz unit
+  ULONG                           ulMaxSystemMemoryClock;       //in 10kHz unit
+  ULONG                           ulMinSystemMemoryClock;       //in 10kHz unit
+  UCHAR                           ucNumberOfCyclesInPeriodHi;
+  UCHAR                           ucLCDTimingSel;             //=0:not valid.!=0 sel this timing descriptor from LCD EDID.
+  USHORT                          usReserved1;
+  USHORT                          usInterNBVoltageLow;        //An intermidiate PMW value to set the voltage
+  USHORT                          usInterNBVoltageHigh;       //Another intermidiate PMW value to set the voltage
+  ULONG                           ulReserved[2];
+
+  USHORT                          usFSBClock;                     //In MHz unit
+  USHORT                          usCapabilityFlag;              //Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable
+                                                                              //Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card
+                                                              //Bit[4]==1: P/2 mode, ==0: P/1 mode
+  USHORT                          usPCIENBCfgReg7;                //bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal
+  USHORT                          usK8MemoryClock;            //in MHz unit
+  USHORT                          usK8SyncStartDelay;         //in 0.01 us unit
+  USHORT                          usK8DataReturnTime;         //in 0.01 us unit
+  UCHAR                           ucMaxNBVoltage;
+  UCHAR                           ucMinNBVoltage;
+  UCHAR                           ucMemoryType;                     //[7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved
+  UCHAR                           ucNumberOfCyclesInPeriod;      //CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod
+  UCHAR                           ucStartingPWM_HighTime;     //CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime
+  UCHAR                           ucHTLinkWidth;              //16 bit vs. 8 bit
+  UCHAR                           ucMaxNBVoltageHigh;
+  UCHAR                           ucMinNBVoltageHigh;
+}ATOM_INTEGRATED_SYSTEM_INFO;
+
+/* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO
+ulBootUpMemoryClock:    For Intel IGP,it's the UMA system memory clock
+                        For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock
+ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
+                        For AMD IGP,for now this can be 0
+ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
+                        For AMD IGP,for now this can be 0
+
+usFSBClock:             For Intel IGP,it's FSB Freq
+                        For AMD IGP,it's HT Link Speed
+
+usK8MemoryClock:        For AMD IGP only. For RevF CPU, set it to 200
+usK8SyncStartDelay:     For AMD IGP only. Memory access latency in K8, required for watermark calculation
+usK8DataReturnTime:     For AMD IGP only. Memory access latency in K8, required for watermark calculation
+
+VC:Voltage Control
+ucMaxNBVoltage:         Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
+ucMinNBVoltage:         Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
+
+ucNumberOfCyclesInPeriod:   Indicate how many cycles when PWM duty is 100%. low 8 bits of the value.
+ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0
+
+ucMaxNBVoltageHigh:     Voltage regulator dependent PWM value. High 8 bits of  the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
+ucMinNBVoltageHigh:     Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
+
+
+usInterNBVoltageLow:    Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all.
+usInterNBVoltageHigh:   Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all.
+*/
+
+
+/*
+The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST;
+Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need.
+The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries.
+
+SW components can access the IGP system infor structure in the same way as before
+*/
+
+
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ULONG                      ulBootUpEngineClock;       //in 10kHz unit
+  ULONG                      ulReserved1[2];            //must be 0x0 for the reserved
+  ULONG                      ulBootUpUMAClock;          //in 10kHz unit
+  ULONG                      ulBootUpSidePortClock;     //in 10kHz unit
+  ULONG                      ulMinSidePortClock;        //in 10kHz unit
+  ULONG                      ulReserved2[6];            //must be 0x0 for the reserved
+  ULONG                      ulSystemConfig;            //see explanation below
+  ULONG                      ulBootUpReqDisplayVector;
+  ULONG                      ulOtherDisplayMisc;
+  ULONG                      ulDDISlot1Config;
+  ULONG                      ulDDISlot2Config;
+  UCHAR                      ucMemoryType;              //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved
+  UCHAR                      ucUMAChannelNumber;
+  UCHAR                      ucDockingPinBit;
+  UCHAR                      ucDockingPinPolarity;
+  ULONG                      ulDockingPinCFGInfo;
+  ULONG                      ulCPUCapInfo;
+  USHORT                     usNumberOfCyclesInPeriod;
+  USHORT                     usMaxNBVoltage;
+  USHORT                     usMinNBVoltage;
+  USHORT                     usBootUpNBVoltage;
+  ULONG                      ulHTLinkFreq;              //in 10Khz
+  USHORT                     usMinHTLinkWidth;
+  USHORT                     usMaxHTLinkWidth;
+  USHORT                     usUMASyncStartDelay;
+  USHORT                     usUMADataReturnTime;
+  USHORT                     usLinkStatusZeroTime;
+  USHORT                     usDACEfuse;            //for storing badgap value (for RS880 only)
+  ULONG                      ulHighVoltageHTLinkFreq;     // in 10Khz
+  ULONG                      ulLowVoltageHTLinkFreq;      // in 10Khz
+  USHORT                     usMaxUpStreamHTLinkWidth;
+  USHORT                     usMaxDownStreamHTLinkWidth;
+  USHORT                     usMinUpStreamHTLinkWidth;
+  USHORT                     usMinDownStreamHTLinkWidth;
+  USHORT                     usFirmwareVersion;         //0 means FW is not supported. Otherwise it's the FW version loaded by SBIOS and driver should enable FW.
+  USHORT                     usFullT0Time;             // Input to calculate minimum HT link change time required by NB P-State. Unit is 0.01us.
+  ULONG                      ulReserved3[96];          //must be 0x0
+}ATOM_INTEGRATED_SYSTEM_INFO_V2;
+
+/*
+ulBootUpEngineClock:   Boot-up Engine Clock in 10Khz;
+ulBootUpUMAClock:      Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present
+ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock
+
+ulSystemConfig:
+Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode;
+Bit[1]=1: system boots up at AMD overdrived state or user customized  mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state
+      =0: system boots up at driver control state. Power state depends on PowerPlay table.
+Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used.
+Bit[3]=1: Only one power state(Performance) will be supported.
+      =0: Multiple power states supported from PowerPlay table.
+Bit[4]=1: CLMC is supported and enabled on current system.
+      =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface.
+Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement.
+      =0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied.
+Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored.
+      =0: Voltage settings is determined by powerplay table.
+Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue.
+      =0: Enable CLMC as regular mode, CDLD and CILR will be enabled.
+Bit[8]=1: CDLF is supported and enabled on current system.
+      =0: CDLF is not supported or enabled on current system.
+Bit[9]=1: DLL Shut Down feature is enabled on current system.
+      =0: DLL Shut Down feature is not enabled or supported on current system.
+
+ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions.
+
+ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion;
+                       [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSuppportedStd definition;
+
+ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design).
+      [3:0]  - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12)
+         [7:4]  - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 4=1 lane 3:0; bit 5=1 lane 7:4; bit 6=1 lane 11:8; bit 7=1 lane 15:12)
+      When a DDI connector is not "paired" (meaming two connections mutualexclusive on chassis or docking, only one of them can be connected at one time.
+      in both chassis and docking, SBIOS has to duplicate the same PCIE lane info from chassis to docking or vice versa. For example:
+      one DDI connector is only populated in docking with PCIE lane 8-11, but there is no paired connection on chassis, SBIOS has to copy bit 6 to bit 2.
+
+         [15:8] - Lane configuration attribute;
+      [23:16]- Connector type, possible value:
+               CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D
+               CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D
+               CONNECTOR_OBJECT_ID_HDMI_TYPE_A
+               CONNECTOR_OBJECT_ID_DISPLAYPORT
+               CONNECTOR_OBJECT_ID_eDP
+         [31:24]- Reserved
+
+ulDDISlot2Config: Same as Slot1.
+ucMemoryType: SidePort memory type, set it to 0x0 when Sideport memory is not installed. Driver needs this info to change sideport memory clock. Not for display in CCC.
+For IGP, Hypermemory is the only memory type showed in CCC.
+
+ucUMAChannelNumber:  how many channels for the UMA;
+
+ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin
+ucDockingPinBit:     which bit in this register to read the pin status;
+ucDockingPinPolarity:Polarity of the pin when docked;
+
+ulCPUCapInfo:        [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, [7:0]=4:Pharaoh, other bits reserved for now and must be 0x0
+
+usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
+
+usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode.
+usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode.
+                    GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0
+                    PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1
+                    GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE
+
+usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value.
+
+
+ulHTLinkFreq:       Bootup HT link Frequency in 10Khz.
+usMinHTLinkWidth:   Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth.
+                    If CDLW enabled, both upstream and downstream width should be the same during bootup.
+usMaxHTLinkWidth:   Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth.
+                    If CDLW enabled, both upstream and downstream width should be the same during bootup.
+
+usUMASyncStartDelay: Memory access latency, required for watermark calculation
+usUMADataReturnTime: Memory access latency, required for watermark calculation
+usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us
+for Griffin or Greyhound. SBIOS needs to convert to actual time by:
+                     if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us)
+                     if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us)
+                     if T0Ttime [5:4]=10b, then usLinkStatusZeroTime=T0Ttime [3:0]*2.0us (0.0 to 30us)
+                     if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us)
+
+ulHighVoltageHTLinkFreq:     HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0.
+                             This must be less than or equal to ulHTLinkFreq(bootup frequency).
+ulLowVoltageHTLinkFreq:      HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0.
+                             This must be less than or equal to ulHighVoltageHTLinkFreq.
+
+usMaxUpStreamHTLinkWidth:    Asymmetric link width support in the future, to replace usMaxHTLinkWidth. Not used for now.
+usMaxDownStreamHTLinkWidth:  same as above.
+usMinUpStreamHTLinkWidth:    Asymmetric link width support in the future, to replace usMinHTLinkWidth. Not used for now.
+usMinDownStreamHTLinkWidth:  same as above.
+*/
+
+// ATOM_INTEGRATED_SYSTEM_INFO::ulCPUCapInfo  - CPU type definition
+#define    INTEGRATED_SYSTEM_INFO__UNKNOWN_CPU             0
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__GRIFFIN        1
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND      2
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__K8             3
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH        4
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI         5
+
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE       INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI    // this deff reflects max defined CPU code
+
+#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE                 0x00000001
+#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE             0x00000002
+#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE                  0x00000004
+#define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY         0x00000008
+#define SYSTEM_CONFIG_CLMC_ENABLED                        0x00000010
+#define SYSTEM_CONFIG_CDLW_ENABLED                        0x00000020
+#define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED              0x00000040
+#define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED            0x00000080
+#define SYSTEM_CONFIG_CDLF_ENABLED                        0x00000100
+#define SYSTEM_CONFIG_DLL_SHUTDOWN_ENABLED                0x00000200
+
+#define IGP_DDI_SLOT_LANE_CONFIG_MASK                     0x000000FF
+
+#define b0IGP_DDI_SLOT_LANE_MAP_MASK                      0x0F
+#define b0IGP_DDI_SLOT_DOCKING_LANE_MAP_MASK              0xF0
+#define b0IGP_DDI_SLOT_CONFIG_LANE_0_3                    0x01
+#define b0IGP_DDI_SLOT_CONFIG_LANE_4_7                    0x02
+#define b0IGP_DDI_SLOT_CONFIG_LANE_8_11                   0x04
+#define b0IGP_DDI_SLOT_CONFIG_LANE_12_15                  0x08
+
+#define IGP_DDI_SLOT_ATTRIBUTE_MASK                       0x0000FF00
+#define IGP_DDI_SLOT_CONFIG_REVERSED                      0x00000100
+#define b1IGP_DDI_SLOT_CONFIG_REVERSED                    0x01
+
+#define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK                  0x00FF0000
+
+// IntegratedSystemInfoTable new Rev is V5 after V2, because of the real rev of V2 is v1.4. This rev is used for RR
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V5
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ULONG                        ulBootUpEngineClock;       //in 10kHz unit
+  ULONG                      ulDentistVCOFreq;          //Dentist VCO clock in 10kHz unit, the source of GPU SCLK, LCLK, UCLK and VCLK.
+  ULONG                      ulLClockFreq;              //GPU Lclk freq in 10kHz unit, have relationship with NCLK in NorthBridge
+  ULONG                        ulBootUpUMAClock;          //in 10kHz unit
+  ULONG                      ulReserved1[8];            //must be 0x0 for the reserved
+  ULONG                      ulBootUpReqDisplayVector;
+  ULONG                      ulOtherDisplayMisc;
+  ULONG                      ulReserved2[4];            //must be 0x0 for the reserved
+  ULONG                      ulSystemConfig;            //TBD
+  ULONG                      ulCPUCapInfo;              //TBD
+  USHORT                     usMaxNBVoltage;            //high NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse;
+  USHORT                     usMinNBVoltage;            //low NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse;
+  USHORT                     usBootUpNBVoltage;         //boot up NB voltage
+  UCHAR                      ucHtcTmpLmt;               //bit [22:16] of D24F3x64 Hardware Thermal Control (HTC) Register, may not be needed, TBD
+  UCHAR                      ucTjOffset;                //bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed, TBD
+  ULONG                      ulReserved3[4];            //must be 0x0 for the reserved
+  ULONG                      ulDDISlot1Config;          //see above ulDDISlot1Config definition
+  ULONG                      ulDDISlot2Config;
+  ULONG                      ulDDISlot3Config;
+  ULONG                      ulDDISlot4Config;
+  ULONG                      ulReserved4[4];            //must be 0x0 for the reserved
+  UCHAR                      ucMemoryType;              //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved
+  UCHAR                      ucUMAChannelNumber;
+  USHORT                     usReserved;
+  ULONG                      ulReserved5[4];            //must be 0x0 for the reserved
+  ULONG                      ulCSR_M3_ARB_CNTL_DEFAULT[10];//arrays with values for CSR M3 arbiter for default
+  ULONG                      ulCSR_M3_ARB_CNTL_UVD[10]; //arrays with values for CSR M3 arbiter for UVD playback
+  ULONG                      ulCSR_M3_ARB_CNTL_FS3D[10];//arrays with values for CSR M3 arbiter for Full Screen 3D applications
+  ULONG                      ulReserved6[61];           //must be 0x0
+}ATOM_INTEGRATED_SYSTEM_INFO_V5;
+
+
+
+/****************************************************************************/
+// Structure used in GPUVirtualizationInfoTable
+/****************************************************************************/
+typedef struct _ATOM_GPU_VIRTUALIZATION_INFO_V2_1
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ULONG ulMCUcodeRomStartAddr;
+  ULONG ulMCUcodeLength;
+  ULONG ulSMCUcodeRomStartAddr;
+  ULONG ulSMCUcodeLength;
+  ULONG ulRLCVUcodeRomStartAddr;
+  ULONG ulRLCVUcodeLength;
+  ULONG ulTOCUcodeStartAddr;
+  ULONG ulTOCUcodeLength;
+  ULONG ulSMCPatchTableStartAddr;
+  ULONG ulSmcPatchTableLength;
+  ULONG ulSystemFlag;
+}ATOM_GPU_VIRTUALIZATION_INFO_V2_1;
+
+
+#define ATOM_CRT_INT_ENCODER1_INDEX                       0x00000000
+#define ATOM_LCD_INT_ENCODER1_INDEX                       0x00000001
+#define ATOM_TV_INT_ENCODER1_INDEX                        0x00000002
+#define ATOM_DFP_INT_ENCODER1_INDEX                       0x00000003
+#define ATOM_CRT_INT_ENCODER2_INDEX                       0x00000004
+#define ATOM_LCD_EXT_ENCODER1_INDEX                       0x00000005
+#define ATOM_TV_EXT_ENCODER1_INDEX                        0x00000006
+#define ATOM_DFP_EXT_ENCODER1_INDEX                       0x00000007
+#define ATOM_CV_INT_ENCODER1_INDEX                        0x00000008
+#define ATOM_DFP_INT_ENCODER2_INDEX                       0x00000009
+#define ATOM_CRT_EXT_ENCODER1_INDEX                       0x0000000A
+#define ATOM_CV_EXT_ENCODER1_INDEX                        0x0000000B
+#define ATOM_DFP_INT_ENCODER3_INDEX                       0x0000000C
+#define ATOM_DFP_INT_ENCODER4_INDEX                       0x0000000D
+
+// define ASIC internal encoder id ( bit vector ), used for CRTC_SourceSelTable
+#define ASIC_INT_DAC1_ENCODER_ID                                     0x00
+#define ASIC_INT_TV_ENCODER_ID                                       0x02
+#define ASIC_INT_DIG1_ENCODER_ID                                     0x03
+#define ASIC_INT_DAC2_ENCODER_ID                                     0x04
+#define ASIC_EXT_TV_ENCODER_ID                                       0x06
+#define ASIC_INT_DVO_ENCODER_ID                                      0x07
+#define ASIC_INT_DIG2_ENCODER_ID                                     0x09
+#define ASIC_EXT_DIG_ENCODER_ID                                      0x05
+#define ASIC_EXT_DIG2_ENCODER_ID                                     0x08
+#define ASIC_INT_DIG3_ENCODER_ID                                     0x0a
+#define ASIC_INT_DIG4_ENCODER_ID                                     0x0b
+#define ASIC_INT_DIG5_ENCODER_ID                                     0x0c
+#define ASIC_INT_DIG6_ENCODER_ID                                     0x0d
+#define ASIC_INT_DIG7_ENCODER_ID                                     0x0e
+
+//define Encoder attribute
+#define ATOM_ANALOG_ENCODER                                                0
+#define ATOM_DIGITAL_ENCODER                                             1
+#define ATOM_DP_ENCODER                                                   2
+
+#define ATOM_ENCODER_ENUM_MASK                            0x70
+#define ATOM_ENCODER_ENUM_ID1                             0x00
+#define ATOM_ENCODER_ENUM_ID2                             0x10
+#define ATOM_ENCODER_ENUM_ID3                             0x20
+#define ATOM_ENCODER_ENUM_ID4                             0x30
+#define ATOM_ENCODER_ENUM_ID5                             0x40
+#define ATOM_ENCODER_ENUM_ID6                             0x50
+
+#define ATOM_DEVICE_CRT1_INDEX                            0x00000000
+#define ATOM_DEVICE_LCD1_INDEX                            0x00000001
+#define ATOM_DEVICE_TV1_INDEX                             0x00000002
+#define ATOM_DEVICE_DFP1_INDEX                            0x00000003
+#define ATOM_DEVICE_CRT2_INDEX                            0x00000004
+#define ATOM_DEVICE_LCD2_INDEX                            0x00000005
+#define ATOM_DEVICE_DFP6_INDEX                            0x00000006
+#define ATOM_DEVICE_DFP2_INDEX                            0x00000007
+#define ATOM_DEVICE_CV_INDEX                              0x00000008
+#define ATOM_DEVICE_DFP3_INDEX                            0x00000009
+#define ATOM_DEVICE_DFP4_INDEX                            0x0000000A
+#define ATOM_DEVICE_DFP5_INDEX                            0x0000000B
+
+#define ATOM_DEVICE_RESERVEDC_INDEX                       0x0000000C
+#define ATOM_DEVICE_RESERVEDD_INDEX                       0x0000000D
+#define ATOM_DEVICE_RESERVEDE_INDEX                       0x0000000E
+#define ATOM_DEVICE_RESERVEDF_INDEX                       0x0000000F
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO                    (ATOM_DEVICE_DFP3_INDEX+1)
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO_2                  ATOM_MAX_SUPPORTED_DEVICE_INFO
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3                  (ATOM_DEVICE_DFP5_INDEX + 1 )
+
+#define ATOM_MAX_SUPPORTED_DEVICE                         (ATOM_DEVICE_RESERVEDF_INDEX+1)
+
+#define ATOM_DEVICE_CRT1_SUPPORT                          (0x1L << ATOM_DEVICE_CRT1_INDEX )
+#define ATOM_DEVICE_LCD1_SUPPORT                          (0x1L << ATOM_DEVICE_LCD1_INDEX )
+#define ATOM_DEVICE_TV1_SUPPORT                           (0x1L << ATOM_DEVICE_TV1_INDEX  )
+#define ATOM_DEVICE_DFP1_SUPPORT                          (0x1L << ATOM_DEVICE_DFP1_INDEX )
+#define ATOM_DEVICE_CRT2_SUPPORT                          (0x1L << ATOM_DEVICE_CRT2_INDEX )
+#define ATOM_DEVICE_LCD2_SUPPORT                          (0x1L << ATOM_DEVICE_LCD2_INDEX )
+#define ATOM_DEVICE_DFP6_SUPPORT                          (0x1L << ATOM_DEVICE_DFP6_INDEX )
+#define ATOM_DEVICE_DFP2_SUPPORT                          (0x1L << ATOM_DEVICE_DFP2_INDEX )
+#define ATOM_DEVICE_CV_SUPPORT                            (0x1L << ATOM_DEVICE_CV_INDEX   )
+#define ATOM_DEVICE_DFP3_SUPPORT                          (0x1L << ATOM_DEVICE_DFP3_INDEX )
+#define ATOM_DEVICE_DFP4_SUPPORT                          (0x1L << ATOM_DEVICE_DFP4_INDEX )
+#define ATOM_DEVICE_DFP5_SUPPORT                          (0x1L << ATOM_DEVICE_DFP5_INDEX )
+
+
+#define ATOM_DEVICE_CRT_SUPPORT                           (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT)
+#define ATOM_DEVICE_DFP_SUPPORT                           (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT |  ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | ATOM_DEVICE_DFP5_SUPPORT | ATOM_DEVICE_DFP6_SUPPORT)
+#define ATOM_DEVICE_TV_SUPPORT                            ATOM_DEVICE_TV1_SUPPORT
+#define ATOM_DEVICE_LCD_SUPPORT                           (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT)
+
+#define ATOM_DEVICE_CONNECTOR_TYPE_MASK                   0x000000F0
+#define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT                  0x00000004
+#define ATOM_DEVICE_CONNECTOR_VGA                         0x00000001
+#define ATOM_DEVICE_CONNECTOR_DVI_I                       0x00000002
+#define ATOM_DEVICE_CONNECTOR_DVI_D                       0x00000003
+#define ATOM_DEVICE_CONNECTOR_DVI_A                       0x00000004
+#define ATOM_DEVICE_CONNECTOR_SVIDEO                      0x00000005
+#define ATOM_DEVICE_CONNECTOR_COMPOSITE                   0x00000006
+#define ATOM_DEVICE_CONNECTOR_LVDS                        0x00000007
+#define ATOM_DEVICE_CONNECTOR_DIGI_LINK                   0x00000008
+#define ATOM_DEVICE_CONNECTOR_SCART                       0x00000009
+#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_A                 0x0000000A
+#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_B                 0x0000000B
+#define ATOM_DEVICE_CONNECTOR_CASE_1                      0x0000000E
+#define ATOM_DEVICE_CONNECTOR_DISPLAYPORT                 0x0000000F
+
+
+#define ATOM_DEVICE_DAC_INFO_MASK                         0x0000000F
+#define ATOM_DEVICE_DAC_INFO_SHIFT                        0x00000000
+#define ATOM_DEVICE_DAC_INFO_NODAC                        0x00000000
+#define ATOM_DEVICE_DAC_INFO_DACA                         0x00000001
+#define ATOM_DEVICE_DAC_INFO_DACB                         0x00000002
+#define ATOM_DEVICE_DAC_INFO_EXDAC                        0x00000003
+
+#define ATOM_DEVICE_I2C_ID_NOI2C                          0x00000000
+
+#define ATOM_DEVICE_I2C_LINEMUX_MASK                      0x0000000F
+#define ATOM_DEVICE_I2C_LINEMUX_SHIFT                     0x00000000
+
+#define ATOM_DEVICE_I2C_ID_MASK                           0x00000070
+#define ATOM_DEVICE_I2C_ID_SHIFT                          0x00000004
+#define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE              0x00000001
+#define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE                  0x00000002
+#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE                0x00000003    //For IGP RS600
+#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL                 0x00000004    //For IGP RS690
+
+#define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK                 0x00000080
+#define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT                0x00000007
+#define ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C            0x00000000
+#define ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C            0x00000001
+
+//  usDeviceSupport:
+//  Bits0   = 0 - no CRT1 support= 1- CRT1 is supported
+//  Bit 1   = 0 - no LCD1 support= 1- LCD1 is supported
+//  Bit 2   = 0 - no TV1  support= 1- TV1  is supported
+//  Bit 3   = 0 - no DFP1 support= 1- DFP1 is supported
+//  Bit 4   = 0 - no CRT2 support= 1- CRT2 is supported
+//  Bit 5   = 0 - no LCD2 support= 1- LCD2 is supported
+//  Bit 6   = 0 - no DFP6 support= 1- DFP6 is supported
+//  Bit 7   = 0 - no DFP2 support= 1- DFP2 is supported
+//  Bit 8   = 0 - no CV   support= 1- CV   is supported
+//  Bit 9   = 0 - no DFP3 support= 1- DFP3 is supported
+//  Bit 10= 0 - no DFP4 support= 1- DFP4 is supported
+//  Bit 11= 0 - no DFP5 support= 1- DFP5 is supported
+//
+//
+
+/****************************************************************************/
+// Structure used in MclkSS_InfoTable
+/****************************************************************************/
+//      ucI2C_ConfigID
+//    [7:0] - I2C LINE Associate ID
+//          = 0   - no I2C
+//    [7]      -   HW_Cap        =   1,  [6:0]=HW assisted I2C ID(HW line selection)
+//                          =   0,  [6:0]=SW assisted I2C ID
+//    [6-4]   - HW_ENGINE_ID  =   1,  HW engine for NON multimedia use
+//                          =   2,   HW engine for Multimedia use
+//                          =   3-7   Reserved for future I2C engines
+//      [3-0] - I2C_LINE_MUX  = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C
+
+typedef struct _ATOM_I2C_ID_CONFIG
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR   bfHW_Capable:1;
+  UCHAR   bfHW_EngineID:3;
+  UCHAR   bfI2C_LineMux:4;
+#else
+  UCHAR   bfI2C_LineMux:4;
+  UCHAR   bfHW_EngineID:3;
+  UCHAR   bfHW_Capable:1;
+#endif
+}ATOM_I2C_ID_CONFIG;
+
+typedef union _ATOM_I2C_ID_CONFIG_ACCESS
+{
+  ATOM_I2C_ID_CONFIG sbfAccess;
+  UCHAR              ucAccess;
+}ATOM_I2C_ID_CONFIG_ACCESS;
+
+
+/****************************************************************************/
+// Structure used in GPIO_I2C_InfoTable
+/****************************************************************************/
+typedef struct _ATOM_GPIO_I2C_ASSIGMENT
+{
+  USHORT                    usClkMaskRegisterIndex;
+  USHORT                    usClkEnRegisterIndex;
+  USHORT                    usClkY_RegisterIndex;
+  USHORT                    usClkA_RegisterIndex;
+  USHORT                    usDataMaskRegisterIndex;
+  USHORT                    usDataEnRegisterIndex;
+  USHORT                    usDataY_RegisterIndex;
+  USHORT                    usDataA_RegisterIndex;
+  ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+  UCHAR                     ucClkMaskShift;
+  UCHAR                     ucClkEnShift;
+  UCHAR                     ucClkY_Shift;
+  UCHAR                     ucClkA_Shift;
+  UCHAR                     ucDataMaskShift;
+  UCHAR                     ucDataEnShift;
+  UCHAR                     ucDataY_Shift;
+  UCHAR                     ucDataA_Shift;
+  UCHAR                     ucReserved1;
+  UCHAR                     ucReserved2;
+}ATOM_GPIO_I2C_ASSIGMENT;
+
+typedef struct _ATOM_GPIO_I2C_INFO
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ATOM_GPIO_I2C_ASSIGMENT   asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE];
+}ATOM_GPIO_I2C_INFO;
+
+/****************************************************************************/
+// Common Structure used in other structures
+/****************************************************************************/
+
+#ifndef _H2INC
+
+//Please don't add or expand this bitfield structure below, this one will retire soon.!
+typedef struct _ATOM_MODE_MISC_INFO
+{
+#if ATOM_BIG_ENDIAN
+  USHORT Reserved:6;
+  USHORT RGB888:1;
+  USHORT DoubleClock:1;
+  USHORT Interlace:1;
+  USHORT CompositeSync:1;
+  USHORT V_ReplicationBy2:1;
+  USHORT H_ReplicationBy2:1;
+  USHORT VerticalCutOff:1;
+  USHORT VSyncPolarity:1;      //0=Active High, 1=Active Low
+  USHORT HSyncPolarity:1;      //0=Active High, 1=Active Low
+  USHORT HorizontalCutOff:1;
+#else
+  USHORT HorizontalCutOff:1;
+  USHORT HSyncPolarity:1;      //0=Active High, 1=Active Low
+  USHORT VSyncPolarity:1;      //0=Active High, 1=Active Low
+  USHORT VerticalCutOff:1;
+  USHORT H_ReplicationBy2:1;
+  USHORT V_ReplicationBy2:1;
+  USHORT CompositeSync:1;
+  USHORT Interlace:1;
+  USHORT DoubleClock:1;
+  USHORT RGB888:1;
+  USHORT Reserved:6;
+#endif
+}ATOM_MODE_MISC_INFO;
+
+typedef union _ATOM_MODE_MISC_INFO_ACCESS
+{
+  ATOM_MODE_MISC_INFO sbfAccess;
+  USHORT              usAccess;
+}ATOM_MODE_MISC_INFO_ACCESS;
+
+#else
+
+typedef union _ATOM_MODE_MISC_INFO_ACCESS
+{
+  USHORT              usAccess;
+}ATOM_MODE_MISC_INFO_ACCESS;
+
+#endif
+
+// usModeMiscInfo-
+#define ATOM_H_CUTOFF           0x01
+#define ATOM_HSYNC_POLARITY     0x02             //0=Active High, 1=Active Low
+#define ATOM_VSYNC_POLARITY     0x04             //0=Active High, 1=Active Low
+#define ATOM_V_CUTOFF           0x08
+#define ATOM_H_REPLICATIONBY2   0x10
+#define ATOM_V_REPLICATIONBY2   0x20
+#define ATOM_COMPOSITESYNC      0x40
+#define ATOM_INTERLACE          0x80
+#define ATOM_DOUBLE_CLOCK_MODE  0x100
+#define ATOM_RGB888_MODE        0x200
+
+//usRefreshRate-
+#define ATOM_REFRESH_43         43
+#define ATOM_REFRESH_47         47
+#define ATOM_REFRESH_56         56
+#define ATOM_REFRESH_60         60
+#define ATOM_REFRESH_65         65
+#define ATOM_REFRESH_70         70
+#define ATOM_REFRESH_72         72
+#define ATOM_REFRESH_75         75
+#define ATOM_REFRESH_85         85
+
+// ATOM_MODE_TIMING data are exactly the same as VESA timing data.
+// Translation from EDID to ATOM_MODE_TIMING, use the following formula.
+//
+//   VESA_HTOTAL         =   VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK
+//                  =   EDID_HA + EDID_HBL
+//   VESA_HDISP         =   VESA_ACTIVE   =   EDID_HA
+//   VESA_HSYNC_START   =   VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH
+//                  =   EDID_HA + EDID_HSO
+//   VESA_HSYNC_WIDTH   =   VESA_HSYNC_TIME   =   EDID_HSPW
+//   VESA_BORDER         =   EDID_BORDER
+
+
+/****************************************************************************/
+// Structure used in SetCRTC_UsingDTDTimingTable
+/****************************************************************************/
+typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS
+{
+  USHORT  usH_Size;
+  USHORT  usH_Blanking_Time;
+  USHORT  usV_Size;
+  USHORT  usV_Blanking_Time;
+  USHORT  usH_SyncOffset;
+  USHORT  usH_SyncWidth;
+  USHORT  usV_SyncOffset;
+  USHORT  usV_SyncWidth;
+  ATOM_MODE_MISC_INFO_ACCESS  susModeMiscInfo;
+  UCHAR   ucH_Border;         // From DFP EDID
+  UCHAR   ucV_Border;
+  UCHAR   ucCRTC;             // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR   ucPadding[3];
+}SET_CRTC_USING_DTD_TIMING_PARAMETERS;
+
+/****************************************************************************/
+// Structure used in SetCRTC_TimingTable
+/****************************************************************************/
+typedef struct _SET_CRTC_TIMING_PARAMETERS
+{
+  USHORT                      usH_Total;        // horizontal total
+  USHORT                      usH_Disp;         // horizontal display
+  USHORT                      usH_SyncStart;    // horozontal Sync start
+  USHORT                      usH_SyncWidth;    // horizontal Sync width
+  USHORT                      usV_Total;        // vertical total
+  USHORT                      usV_Disp;         // vertical display
+  USHORT                      usV_SyncStart;    // vertical Sync start
+  USHORT                      usV_SyncWidth;    // vertical Sync width
+  ATOM_MODE_MISC_INFO_ACCESS  susModeMiscInfo;
+  UCHAR                       ucCRTC;           // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR                       ucOverscanRight;  // right
+  UCHAR                       ucOverscanLeft;   // left
+  UCHAR                       ucOverscanBottom; // bottom
+  UCHAR                       ucOverscanTop;    // top
+  UCHAR                       ucReserved;
+}SET_CRTC_TIMING_PARAMETERS;
+#define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS
+
+
+/****************************************************************************/
+// Structure used in StandardVESA_TimingTable
+//                   AnalogTV_InfoTable
+//                   ComponentVideoInfoTable
+/****************************************************************************/
+typedef struct _ATOM_MODE_TIMING
+{
+  USHORT  usCRTC_H_Total;
+  USHORT  usCRTC_H_Disp;
+  USHORT  usCRTC_H_SyncStart;
+  USHORT  usCRTC_H_SyncWidth;
+  USHORT  usCRTC_V_Total;
+  USHORT  usCRTC_V_Disp;
+  USHORT  usCRTC_V_SyncStart;
+  USHORT  usCRTC_V_SyncWidth;
+  USHORT  usPixelClock;                                //in 10Khz unit
+  ATOM_MODE_MISC_INFO_ACCESS  susModeMiscInfo;
+  USHORT  usCRTC_OverscanRight;
+  USHORT  usCRTC_OverscanLeft;
+  USHORT  usCRTC_OverscanBottom;
+  USHORT  usCRTC_OverscanTop;
+  USHORT  usReserve;
+  UCHAR   ucInternalModeNumber;
+  UCHAR   ucRefreshRate;
+}ATOM_MODE_TIMING;
+
+typedef struct _ATOM_DTD_FORMAT
+{
+  USHORT  usPixClk;
+  USHORT  usHActive;
+  USHORT  usHBlanking_Time;
+  USHORT  usVActive;
+  USHORT  usVBlanking_Time;
+  USHORT  usHSyncOffset;
+  USHORT  usHSyncWidth;
+  USHORT  usVSyncOffset;
+  USHORT  usVSyncWidth;
+  USHORT  usImageHSize;
+  USHORT  usImageVSize;
+  UCHAR   ucHBorder;
+  UCHAR   ucVBorder;
+  ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+  UCHAR   ucInternalModeNumber;
+  UCHAR   ucRefreshRate;
+}ATOM_DTD_FORMAT;
+
+/****************************************************************************/
+// Structure used in LVDS_InfoTable
+//  * Need a document to describe this table
+/****************************************************************************/
+#define SUPPORTED_LCD_REFRESHRATE_30Hz          0x0004
+#define SUPPORTED_LCD_REFRESHRATE_40Hz          0x0008
+#define SUPPORTED_LCD_REFRESHRATE_50Hz          0x0010
+#define SUPPORTED_LCD_REFRESHRATE_60Hz          0x0020
+#define SUPPORTED_LCD_REFRESHRATE_48Hz          0x0040
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=1
+typedef struct _ATOM_LVDS_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  ATOM_DTD_FORMAT     sLCDTiming;
+  USHORT              usModePatchTableOffset;
+  USHORT              usSupportedRefreshRate;     //Refer to panel info table in ATOMBIOS extension Spec.
+  USHORT              usOffDelayInMs;
+  UCHAR               ucPowerSequenceDigOntoDEin10Ms;
+  UCHAR               ucPowerSequenceDEtoBLOnin10Ms;
+  UCHAR               ucLVDS_Misc;               // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level}
+                                                 // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
+                                                 // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled}
+                                                 // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled}
+  UCHAR               ucPanelDefaultRefreshRate;
+  UCHAR               ucPanelIdentification;
+  UCHAR               ucSS_Id;
+}ATOM_LVDS_INFO;
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=2
+typedef struct _ATOM_LVDS_INFO_V12
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  ATOM_DTD_FORMAT     sLCDTiming;
+  USHORT              usExtInfoTableOffset;
+  USHORT              usSupportedRefreshRate;     //Refer to panel info table in ATOMBIOS extension Spec.
+  USHORT              usOffDelayInMs;
+  UCHAR               ucPowerSequenceDigOntoDEin10Ms;
+  UCHAR               ucPowerSequenceDEtoBLOnin10Ms;
+  UCHAR               ucLVDS_Misc;               // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level}
+                                                 // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
+                                                 // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled}
+                                                 // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled}
+  UCHAR               ucPanelDefaultRefreshRate;
+  UCHAR               ucPanelIdentification;
+  UCHAR               ucSS_Id;
+  USHORT              usLCDVenderID;
+  USHORT              usLCDProductID;
+  UCHAR               ucLCDPanel_SpecialHandlingCap;
+   UCHAR                        ucPanelInfoSize;               //  start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
+  UCHAR               ucReserved[2];
+}ATOM_LVDS_INFO_V12;
+
+//Definitions for ucLCDPanel_SpecialHandlingCap:
+
+//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12.
+//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL
+#define   LCDPANEL_CAP_READ_EDID                  0x1
+
+//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
+//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
+//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
+#define   LCDPANEL_CAP_DRR_SUPPORTED              0x2
+
+//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
+#define   LCDPANEL_CAP_eDP                        0x4
+
+
+//Color Bit Depth definition in EDID V1.4 @BYTE 14h
+//Bit 6  5  4
+                              //      0  0  0  -  Color bit depth is undefined
+                              //      0  0  1  -  6 Bits per Primary Color
+                              //      0  1  0  -  8 Bits per Primary Color
+                              //      0  1  1  - 10 Bits per Primary Color
+                              //      1  0  0  - 12 Bits per Primary Color
+                              //      1  0  1  - 14 Bits per Primary Color
+                              //      1  1  0  - 16 Bits per Primary Color
+                              //      1  1  1  - Reserved
+
+#define PANEL_COLOR_BIT_DEPTH_MASK    0x70
+
+// Bit7:{=0:Random Dithering disabled;1 Random Dithering enabled}
+#define PANEL_RANDOM_DITHER   0x80
+#define PANEL_RANDOM_DITHER_MASK   0x80
+
+#define ATOM_LVDS_INFO_LAST  ATOM_LVDS_INFO_V12   // no need to change this
+
+
+typedef struct _ATOM_LCD_REFRESH_RATE_SUPPORT
+{
+    UCHAR ucSupportedRefreshRate;
+    UCHAR ucMinRefreshRateForDRR;
+}ATOM_LCD_REFRESH_RATE_SUPPORT;
+
+/****************************************************************************/
+// Structures used by LCD_InfoTable V1.3    Note: previous version was called ATOM_LVDS_INFO_V12
+// ASIC Families:  NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=3
+/****************************************************************************/
+typedef struct _ATOM_LCD_INFO_V13
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  ATOM_DTD_FORMAT     sLCDTiming;
+  USHORT              usExtInfoTableOffset;
+  union
+  {
+    USHORT            usSupportedRefreshRate;
+    ATOM_LCD_REFRESH_RATE_SUPPORT sRefreshRateSupport;
+  };
+  ULONG               ulReserved0;
+  UCHAR               ucLCD_Misc;                // Reorganized in V13
+                                                 // Bit0: {=0:single, =1:dual},
+                                                 // Bit1: {=0:LDI format for RGB888, =1 FPDI format for RGB888}  // was {=0:666RGB, =1:888RGB},
+                                                 // Bit3:2: {Grey level}
+                                                 // Bit6:4 Color Bit Depth definition (see below definition in EDID V1.4 @BYTE 14h)
+                                                 // Bit7   Reserved.  was for ATOM_PANEL_MISC_API_ENABLED, still need it?
+  UCHAR               ucPanelDefaultRefreshRate;
+  UCHAR               ucPanelIdentification;
+  UCHAR               ucSS_Id;
+  USHORT              usLCDVenderID;
+  USHORT              usLCDProductID;
+  UCHAR               ucLCDPanel_SpecialHandlingCap;  // Reorganized in V13
+                                                 // Bit0: Once DAL sees this CAP is set, it will read EDID from LCD on its own
+                                                 // Bit1: See LCDPANEL_CAP_DRR_SUPPORTED
+                                                 // Bit2: a quick reference whether an embadded panel (LCD1 ) is LVDS (0) or eDP (1)
+                                                 // Bit7-3: Reserved
+  UCHAR               ucPanelInfoSize;                //  start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
+  USHORT              usBacklightPWM;            //  Backlight PWM in Hz. New in _V13
+
+  UCHAR               ucPowerSequenceDIGONtoDE_in4Ms;
+  UCHAR               ucPowerSequenceDEtoVARY_BL_in4Ms;
+  UCHAR               ucPowerSequenceVARY_BLtoDE_in4Ms;
+  UCHAR               ucPowerSequenceDEtoDIGON_in4Ms;
+
+  UCHAR               ucOffDelay_in4Ms;
+  UCHAR               ucPowerSequenceVARY_BLtoBLON_in4Ms;
+  UCHAR               ucPowerSequenceBLONtoVARY_BL_in4Ms;
+  UCHAR               ucReserved1;
+
+  UCHAR               ucDPCD_eDP_CONFIGURATION_CAP;     // dpcd 0dh
+  UCHAR               ucDPCD_MAX_LINK_RATE;             // dpcd 01h
+  UCHAR               ucDPCD_MAX_LANE_COUNT;            // dpcd 02h
+  UCHAR               ucDPCD_MAX_DOWNSPREAD;            // dpcd 03h
+
+  USHORT              usMaxPclkFreqInSingleLink;        // Max PixelClock frequency in single link mode.
+  UCHAR               uceDPToLVDSRxId;
+  UCHAR               ucLcdReservd;
+  ULONG               ulReserved[2];
+}ATOM_LCD_INFO_V13;
+
+#define ATOM_LCD_INFO_LAST  ATOM_LCD_INFO_V13
+
+//Definitions for ucLCD_Misc
+#define ATOM_PANEL_MISC_V13_DUAL                   0x00000001
+#define ATOM_PANEL_MISC_V13_FPDI                   0x00000002
+#define ATOM_PANEL_MISC_V13_GREY_LEVEL             0x0000000C
+#define ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT       2
+#define ATOM_PANEL_MISC_V13_COLOR_BIT_DEPTH_MASK   0x70
+#define ATOM_PANEL_MISC_V13_6BIT_PER_COLOR         0x10
+#define ATOM_PANEL_MISC_V13_8BIT_PER_COLOR         0x20
+
+//Color Bit Depth definition in EDID V1.4 @BYTE 14h
+//Bit 6  5  4
+                              //      0  0  0  -  Color bit depth is undefined
+                              //      0  0  1  -  6 Bits per Primary Color
+                              //      0  1  0  -  8 Bits per Primary Color
+                              //      0  1  1  - 10 Bits per Primary Color
+                              //      1  0  0  - 12 Bits per Primary Color
+                              //      1  0  1  - 14 Bits per Primary Color
+                              //      1  1  0  - 16 Bits per Primary Color
+                              //      1  1  1  - Reserved
+
+//Definitions for ucLCDPanel_SpecialHandlingCap:
+
+//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12.
+//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL
+#define   LCDPANEL_CAP_V13_READ_EDID              0x1        // = LCDPANEL_CAP_READ_EDID no change comparing to previous version
+
+//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
+//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
+//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
+#define   LCDPANEL_CAP_V13_DRR_SUPPORTED          0x2        // = LCDPANEL_CAP_DRR_SUPPORTED no change comparing to previous version
+
+//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
+#define   LCDPANEL_CAP_V13_eDP                    0x4        // = LCDPANEL_CAP_eDP no change comparing to previous version
+
+//uceDPToLVDSRxId
+#define eDP_TO_LVDS_RX_DISABLE                  0x00       // no eDP->LVDS translator chip
+#define eDP_TO_LVDS_COMMON_ID                   0x01       // common eDP->LVDS translator chip without AMD SW init
+#define eDP_TO_LVDS_RT_ID                       0x02       // RT tansaltor which require AMD SW init
+
+typedef struct  _ATOM_PATCH_RECORD_MODE
+{
+  UCHAR     ucRecordType;
+  USHORT    usHDisp;
+  USHORT    usVDisp;
+}ATOM_PATCH_RECORD_MODE;
+
+typedef struct  _ATOM_LCD_RTS_RECORD
+{
+  UCHAR     ucRecordType;
+  UCHAR     ucRTSValue;
+}ATOM_LCD_RTS_RECORD;
+
+//!! If the record below exits, it shoud always be the first record for easy use in command table!!!
+// The record below is only used when LVDS_Info is present. From ATOM_LVDS_INFO_V12, use ucLCDPanel_SpecialHandlingCap instead.
+typedef struct  _ATOM_LCD_MODE_CONTROL_CAP
+{
+  UCHAR     ucRecordType;
+  USHORT    usLCDCap;
+}ATOM_LCD_MODE_CONTROL_CAP;
+
+#define LCD_MODE_CAP_BL_OFF                   1
+#define LCD_MODE_CAP_CRTC_OFF                 2
+#define LCD_MODE_CAP_PANEL_OFF                4
+
+
+typedef struct _ATOM_FAKE_EDID_PATCH_RECORD
+{
+  UCHAR ucRecordType;
+  UCHAR ucFakeEDIDLength;       // = 128 means EDID lenght is 128 bytes, otherwise the EDID length = ucFakeEDIDLength*128
+  UCHAR ucFakeEDIDString[1];    // This actually has ucFakeEdidLength elements.
+} ATOM_FAKE_EDID_PATCH_RECORD;
+
+typedef struct  _ATOM_PANEL_RESOLUTION_PATCH_RECORD
+{
+   UCHAR    ucRecordType;
+   USHORT      usHSize;
+   USHORT      usVSize;
+}ATOM_PANEL_RESOLUTION_PATCH_RECORD;
+
+#define LCD_MODE_PATCH_RECORD_MODE_TYPE       1
+#define LCD_RTS_RECORD_TYPE                   2
+#define LCD_CAP_RECORD_TYPE                   3
+#define LCD_FAKE_EDID_PATCH_RECORD_TYPE       4
+#define LCD_PANEL_RESOLUTION_RECORD_TYPE      5
+#define LCD_EDID_OFFSET_PATCH_RECORD_TYPE     6
+#define ATOM_RECORD_END_TYPE                  0xFF
+
+/****************************Spread Spectrum Info Table Definitions **********************/
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=2
+typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT
+{
+  USHORT              usSpreadSpectrumPercentage;
+  UCHAR               ucSpreadSpectrumType;       //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Bit2=1: PCIE REFCLK SS =0 iternal PPLL SS  Others:TBD
+  UCHAR               ucSS_Step;
+  UCHAR               ucSS_Delay;
+  UCHAR               ucSS_Id;
+  UCHAR               ucRecommendedRef_Div;
+  UCHAR               ucSS_Range;               //it was reserved for V11
+}ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
+
+#define ATOM_MAX_SS_ENTRY                      16
+#define ATOM_DP_SS_ID1                                     0x0f1         // SS ID for internal DP stream at 2.7Ghz. if ATOM_DP_SS_ID2 does not exist in SS_InfoTable, it is used for internal DP stream at 1.62Ghz as well.
+#define ATOM_DP_SS_ID2                                     0x0f2         // SS ID for internal DP stream at 1.62Ghz, if it exists in SS_InfoTable.
+#define ATOM_LVLINK_2700MHz_SS_ID              0x0f3      // SS ID for LV link translator chip at 2.7Ghz
+#define ATOM_LVLINK_1620MHz_SS_ID              0x0f4      // SS ID for LV link translator chip at 1.62Ghz
+
+
+
+#define ATOM_SS_DOWN_SPREAD_MODE_MASK          0x00000000
+#define ATOM_SS_DOWN_SPREAD_MODE               0x00000000
+#define ATOM_SS_CENTRE_SPREAD_MODE_MASK        0x00000001
+#define ATOM_SS_CENTRE_SPREAD_MODE             0x00000001
+#define ATOM_INTERNAL_SS_MASK                  0x00000000
+#define ATOM_EXTERNAL_SS_MASK                  0x00000002
+#define EXEC_SS_STEP_SIZE_SHIFT                2
+#define EXEC_SS_DELAY_SHIFT                    4
+#define ACTIVEDATA_TO_BLON_DELAY_SHIFT         4
+
+typedef struct _ATOM_SPREAD_SPECTRUM_INFO
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ATOM_SPREAD_SPECTRUM_ASSIGNMENT   asSS_Info[ATOM_MAX_SS_ENTRY];
+}ATOM_SPREAD_SPECTRUM_INFO;
+
+
+/****************************************************************************/
+// Structure used in AnalogTV_InfoTable (Top level)
+/****************************************************************************/
+//ucTVBootUpDefaultStd definiton:
+
+//ATOM_TV_NTSC                1
+//ATOM_TV_NTSCJ               2
+//ATOM_TV_PAL                 3
+//ATOM_TV_PALM                4
+//ATOM_TV_PALCN               5
+//ATOM_TV_PALN                6
+//ATOM_TV_PAL60               7
+//ATOM_TV_SECAM               8
+
+//ucTVSuppportedStd definition:
+#define NTSC_SUPPORT          0x1
+#define NTSCJ_SUPPORT         0x2
+
+#define PAL_SUPPORT           0x4
+#define PALM_SUPPORT          0x8
+#define PALCN_SUPPORT         0x10
+#define PALN_SUPPORT          0x20
+#define PAL60_SUPPORT         0x40
+#define SECAM_SUPPORT         0x80
+
+#define MAX_SUPPORTED_TV_TIMING    2
+
+typedef struct _ATOM_ANALOG_TV_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  UCHAR                    ucTV_SuppportedStandard;
+  UCHAR                    ucTV_BootUpDefaultStandard;
+  UCHAR                    ucExt_TV_ASIC_ID;
+  UCHAR                    ucExt_TV_ASIC_SlaveAddr;
+  ATOM_DTD_FORMAT          aModeTimings[MAX_SUPPORTED_TV_TIMING];
+}ATOM_ANALOG_TV_INFO;
+
+typedef struct _ATOM_DPCD_INFO
+{
+  UCHAR   ucRevisionNumber;        //10h : Revision 1.0; 11h : Revision 1.1
+  UCHAR   ucMaxLinkRate;           //06h : 1.62Gbps per lane; 0Ah = 2.7Gbps per lane
+  UCHAR   ucMaxLane;               //Bits 4:0 = MAX_LANE_COUNT (1/2/4). Bit 7 = ENHANCED_FRAME_CAP
+  UCHAR   ucMaxDownSpread;         //Bit0 = 0: No Down spread; Bit0 = 1: 0.5% (Subject to change according to DP spec)
+}ATOM_DPCD_INFO;
+
+#define ATOM_DPCD_MAX_LANE_MASK    0x1F
+
+/**************************************************************************/
+// VRAM usage and their defintions
+
+// One chunk of VRAM used by Bios are for HWICON surfaces,EDID data.
+// Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below.
+// All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned!
+// To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR
+// To Bios:  ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX
+
+// Moved VESA_MEMORY_IN_64K_BLOCK definition to "AtomConfig.h" so that it can be redefined in design (SKU).
+//#ifndef VESA_MEMORY_IN_64K_BLOCK
+//#define VESA_MEMORY_IN_64K_BLOCK        0x100       //256*64K=16Mb (Max. VESA memory is 16Mb!)
+//#endif
+
+#define ATOM_EDID_RAW_DATASIZE          256         //In Bytes
+#define ATOM_HWICON_SURFACE_SIZE        4096        //In Bytes
+#define ATOM_HWICON_INFOTABLE_SIZE      32
+#define MAX_DTD_MODE_IN_VRAM            6
+#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE  (MAX_DTD_MODE_IN_VRAM*28)    //28= (SIZEOF ATOM_DTD_FORMAT)
+#define ATOM_STD_MODE_SUPPORT_TBL_SIZE  32*8                         //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT)
+//20 bytes for Encoder Type and DPCD in STD EDID area
+#define DFP_ENCODER_TYPE_OFFSET         (ATOM_EDID_RAW_DATASIZE + ATOM_DTD_MODE_SUPPORT_TBL_SIZE + ATOM_STD_MODE_SUPPORT_TBL_SIZE - 20)
+#define ATOM_DP_DPCD_OFFSET             (DFP_ENCODER_TYPE_OFFSET + 4 )
+
+#define ATOM_HWICON1_SURFACE_ADDR       0
+#define ATOM_HWICON2_SURFACE_ADDR       (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
+#define ATOM_HWICON_INFOTABLE_ADDR      (ATOM_HWICON2_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
+#define ATOM_CRT1_EDID_ADDR             (ATOM_HWICON_INFOTABLE_ADDR + ATOM_HWICON_INFOTABLE_SIZE)
+#define ATOM_CRT1_DTD_MODE_TBL_ADDR     (ATOM_CRT1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_CRT1_STD_MODE_TBL_ADDR       (ATOM_CRT1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_LCD1_EDID_ADDR             (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_LCD1_DTD_MODE_TBL_ADDR     (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_LCD1_STD_MODE_TBL_ADDR      (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_TV1_DTD_MODE_TBL_ADDR      (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP1_EDID_ADDR             (ATOM_TV1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP1_DTD_MODE_TBL_ADDR     (ATOM_DFP1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP1_STD_MODE_TBL_ADDR       (ATOM_DFP1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_CRT2_EDID_ADDR             (ATOM_DFP1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_CRT2_DTD_MODE_TBL_ADDR     (ATOM_CRT2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_CRT2_STD_MODE_TBL_ADDR       (ATOM_CRT2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_LCD2_EDID_ADDR             (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_LCD2_DTD_MODE_TBL_ADDR     (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_LCD2_STD_MODE_TBL_ADDR      (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP6_EDID_ADDR             (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP6_DTD_MODE_TBL_ADDR     (ATOM_DFP6_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP6_STD_MODE_TBL_ADDR     (ATOM_DFP6_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP2_EDID_ADDR             (ATOM_DFP6_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP2_DTD_MODE_TBL_ADDR     (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP2_STD_MODE_TBL_ADDR     (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_CV_EDID_ADDR               (ATOM_DFP2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_CV_DTD_MODE_TBL_ADDR       (ATOM_CV_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_CV_STD_MODE_TBL_ADDR       (ATOM_CV_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP3_EDID_ADDR             (ATOM_CV_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP3_DTD_MODE_TBL_ADDR     (ATOM_DFP3_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP3_STD_MODE_TBL_ADDR     (ATOM_DFP3_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP4_EDID_ADDR             (ATOM_DFP3_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP4_DTD_MODE_TBL_ADDR     (ATOM_DFP4_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP4_STD_MODE_TBL_ADDR     (ATOM_DFP4_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP5_EDID_ADDR             (ATOM_DFP4_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP5_DTD_MODE_TBL_ADDR     (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP5_STD_MODE_TBL_ADDR     (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DP_TRAINING_TBL_ADDR       (ATOM_DFP5_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_STACK_STORAGE_START        (ATOM_DP_TRAINING_TBL_ADDR + 1024)
+#define ATOM_STACK_STORAGE_END          ATOM_STACK_STORAGE_START + 512
+
+//The size below is in Kb!
+#define ATOM_VRAM_RESERVE_SIZE         ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
+
+#define ATOM_VRAM_RESERVE_V2_SIZE      32
+
+#define   ATOM_VRAM_OPERATION_FLAGS_MASK         0xC0000000L
+#define ATOM_VRAM_OPERATION_FLAGS_SHIFT        30
+#define   ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION   0x1
+#define   ATOM_VRAM_BLOCK_NEEDS_RESERVATION      0x0
+
+/***********************************************************************************/
+// Structure used in VRAM_UsageByFirmwareTable
+// Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm
+//        at running time.
+// note2: From RV770, the memory is more than 32bit addressable, so we will change
+//        ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains
+//        exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware
+//        (in offset to start of memory address) is KB aligned instead of byte aligend.
+// Note3:
+/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged
+constant across VGA or non VGA adapter,
+for CAIL, The size of FB access area is known, only thing missing is the Offset of FB Access area, so we can  have:
+
+If (ulStartAddrUsedByFirmware!=0)
+FBAccessAreaOffset= ulStartAddrUsedByFirmware - usFBUsedbyDrvInKB;
+Reserved area has been claimed by VBIOS including this FB access area; CAIL doesn't need to reserve any extra area for this purpose
+else   //Non VGA case
+ if (FB_Size<=2Gb)
+    FBAccessAreaOffset= FB_Size - usFBUsedbyDrvInKB;
+ else
+     FBAccessAreaOffset= Aper_Size - usFBUsedbyDrvInKB
+
+CAIL needs to claim an reserved area defined by FBAccessAreaOffset and usFBUsedbyDrvInKB in non VGA case.*/
+
+/***********************************************************************************/
+#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO         1
+
+typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO
+{
+  ULONG   ulStartAddrUsedByFirmware;
+  USHORT  usFirmwareUseInKb;
+  USHORT  usReserved;
+}ATOM_FIRMWARE_VRAM_RESERVE_INFO;
+
+typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  ATOM_FIRMWARE_VRAM_RESERVE_INFO   asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
+}ATOM_VRAM_USAGE_BY_FIRMWARE;
+
+// change verion to 1.5, when allow driver to allocate the vram area for command table access.
+typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5
+{
+  ULONG   ulStartAddrUsedByFirmware;
+  USHORT  usFirmwareUseInKb;
+  USHORT  usFBUsedByDrvInKb;
+}ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5;
+
+typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5   asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
+}ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5;
+
+/****************************************************************************/
+// Structure used in GPIO_Pin_LUTTable
+/****************************************************************************/
+typedef struct _ATOM_GPIO_PIN_ASSIGNMENT
+{
+  USHORT                   usGpioPin_AIndex;
+  UCHAR                    ucGpioPinBitShift;
+  UCHAR                    ucGPIO_ID;
+}ATOM_GPIO_PIN_ASSIGNMENT;
+
+//ucGPIO_ID pre-define id for multiple usage
+// GPIO use to control PCIE_VDDC in certain SLT board
+#define PCIE_VDDC_CONTROL_GPIO_PINID        56
+
+//from SMU7.x, if ucGPIO_ID=PP_AC_DC_SWITCH_GPIO_PINID in GPIO_LUTTable, AC/DC swithing feature is enable
+#define PP_AC_DC_SWITCH_GPIO_PINID          60
+//from SMU7.x, if ucGPIO_ID=VDDC_REGULATOR_VRHOT_GPIO_PINID in GPIO_LUTable, VRHot feature is enable
+#define VDDC_VRHOT_GPIO_PINID               61
+//if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, Peak Current Control feature is enabled
+#define VDDC_PCC_GPIO_PINID                 62
+// Only used on certain SLT/PA board to allow utility to cut Efuse.
+#define EFUSE_CUT_ENABLE_GPIO_PINID         63
+// ucGPIO=DRAM_SELF_REFRESH_GPIO_PIND uses  for memory self refresh (ucGPIO=0, DRAM self-refresh; ucGPIO=
+#define DRAM_SELF_REFRESH_GPIO_PINID        64
+// Thermal interrupt output->system thermal chip GPIO pin
+#define THERMAL_INT_OUTPUT_GPIO_PINID       65
+
+
+typedef struct _ATOM_GPIO_PIN_LUT
+{
+  ATOM_COMMON_TABLE_HEADER  sHeader;
+  ATOM_GPIO_PIN_ASSIGNMENT   asGPIO_Pin[1];
+}ATOM_GPIO_PIN_LUT;
+
+/****************************************************************************/
+// Structure used in ComponentVideoInfoTable
+/****************************************************************************/
+#define GPIO_PIN_ACTIVE_HIGH          0x1
+#define MAX_SUPPORTED_CV_STANDARDS    5
+
+// definitions for ATOM_D_INFO.ucSettings
+#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK  0x1F    // [4:0]
+#define ATOM_GPIO_SETTINGS_RESERVED_MASK  0x60    // [6:5] = must be zeroed out
+#define ATOM_GPIO_SETTINGS_ACTIVE_MASK    0x80    // [7]
+
+typedef struct _ATOM_GPIO_INFO
+{
+  USHORT  usAOffset;
+  UCHAR   ucSettings;
+  UCHAR   ucReserved;
+}ATOM_GPIO_INFO;
+
+// definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector)
+#define ATOM_CV_RESTRICT_FORMAT_SELECTION           0x2
+
+// definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i
+#define ATOM_GPIO_DEFAULT_MODE_EN                   0x80 //[7];
+#define ATOM_GPIO_SETTING_PERMODE_MASK              0x7F //[6:0]
+
+// definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode
+//Line 3 out put 5V.
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A       0x01     //represent gpio 3 state for 16:9
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B       0x02     //represent gpio 4 state for 16:9
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT   0x0
+
+//Line 3 out put 2.2V
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04     //represent gpio 3 state for 4:3 Letter box
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08     //represent gpio 4 state for 4:3 Letter box
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2
+
+//Line 3 out put 0V
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A        0x10     //represent gpio 3 state for 4:3
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B        0x20     //represent gpio 4 state for 4:3
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT    0x4
+
+#define ATOM_CV_LINE3_ASPECTRATIO_MASK              0x3F     // bit [5:0]
+
+#define ATOM_CV_LINE3_ASPECTRATIO_EXIST             0x80     //bit 7
+
+//GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks.
+#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A   3   //bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode.
+#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B   4   //bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode.
+
+
+typedef struct _ATOM_COMPONENT_VIDEO_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  USHORT             usMask_PinRegisterIndex;
+  USHORT             usEN_PinRegisterIndex;
+  USHORT             usY_PinRegisterIndex;
+  USHORT             usA_PinRegisterIndex;
+  UCHAR              ucBitShift;
+  UCHAR              ucPinActiveState;  //ucPinActiveState: Bit0=1 active high, =0 active low
+  ATOM_DTD_FORMAT    sReserved;         // must be zeroed out
+  UCHAR              ucMiscInfo;
+  UCHAR              uc480i;
+  UCHAR              uc480p;
+  UCHAR              uc720p;
+  UCHAR              uc1080i;
+  UCHAR              ucLetterBoxMode;
+  UCHAR              ucReserved[3];
+  UCHAR              ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector
+  ATOM_GPIO_INFO     aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
+  ATOM_DTD_FORMAT    aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
+}ATOM_COMPONENT_VIDEO_INFO;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=1
+typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  UCHAR              ucMiscInfo;
+  UCHAR              uc480i;
+  UCHAR              uc480p;
+  UCHAR              uc720p;
+  UCHAR              uc1080i;
+  UCHAR              ucReserved;
+  UCHAR              ucLetterBoxMode;
+  UCHAR              ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector
+  ATOM_GPIO_INFO     aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
+  ATOM_DTD_FORMAT    aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
+}ATOM_COMPONENT_VIDEO_INFO_V21;
+
+#define ATOM_COMPONENT_VIDEO_INFO_LAST  ATOM_COMPONENT_VIDEO_INFO_V21
+
+/****************************************************************************/
+// Structure used in object_InfoTable
+/****************************************************************************/
+typedef struct _ATOM_OBJECT_HEADER
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  USHORT                    usDeviceSupport;
+  USHORT                    usConnectorObjectTableOffset;
+  USHORT                    usRouterObjectTableOffset;
+  USHORT                    usEncoderObjectTableOffset;
+  USHORT                    usProtectionObjectTableOffset; //only available when Protection block is independent.
+  USHORT                    usDisplayPathTableOffset;
+}ATOM_OBJECT_HEADER;
+
+typedef struct _ATOM_OBJECT_HEADER_V3
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  USHORT                    usDeviceSupport;
+  USHORT                    usConnectorObjectTableOffset;
+  USHORT                    usRouterObjectTableOffset;
+  USHORT                    usEncoderObjectTableOffset;
+  USHORT                    usProtectionObjectTableOffset; //only available when Protection block is independent.
+  USHORT                    usDisplayPathTableOffset;
+  USHORT                    usMiscObjectTableOffset;
+}ATOM_OBJECT_HEADER_V3;
+
+
+typedef struct  _ATOM_DISPLAY_OBJECT_PATH
+{
+  USHORT    usDeviceTag;                                   //supported device
+  USHORT    usSize;                                        //the size of ATOM_DISPLAY_OBJECT_PATH
+  USHORT    usConnObjectId;                                //Connector Object ID
+  USHORT    usGPUObjectId;                                 //GPU ID
+  USHORT    usGraphicObjIds[1];                            //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
+}ATOM_DISPLAY_OBJECT_PATH;
+
+typedef struct  _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH
+{
+  USHORT    usDeviceTag;                                   //supported device
+  USHORT    usSize;                                        //the size of ATOM_DISPLAY_OBJECT_PATH
+  USHORT    usConnObjectId;                                //Connector Object ID
+  USHORT    usGPUObjectId;                                 //GPU ID
+  USHORT    usGraphicObjIds[2];                            //usGraphicObjIds[0]= GPU internal encoder, usGraphicObjIds[1]= external encoder
+}ATOM_DISPLAY_EXTERNAL_OBJECT_PATH;
+
+typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE
+{
+  UCHAR                           ucNumOfDispPath;
+  UCHAR                           ucVersion;
+  UCHAR                           ucPadding[2];
+  ATOM_DISPLAY_OBJECT_PATH        asDispPath[1];
+}ATOM_DISPLAY_OBJECT_PATH_TABLE;
+
+typedef struct _ATOM_OBJECT                                //each object has this structure
+{
+  USHORT              usObjectID;
+  USHORT              usSrcDstTableOffset;
+  USHORT              usRecordOffset;                     //this pointing to a bunch of records defined below
+  USHORT              usReserved;
+}ATOM_OBJECT;
+
+typedef struct _ATOM_OBJECT_TABLE                         //Above 4 object table offset pointing to a bunch of objects all have this structure
+{
+  UCHAR               ucNumberOfObjects;
+  UCHAR               ucPadding[3];
+  ATOM_OBJECT         asObjects[1];
+}ATOM_OBJECT_TABLE;
+
+typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT         //usSrcDstTableOffset pointing to this structure
+{
+  UCHAR               ucNumberOfSrc;
+  USHORT              usSrcObjectID[1];
+  UCHAR               ucNumberOfDst;
+  USHORT              usDstObjectID[1];
+}ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT;
+
+
+//Two definitions below are for OPM on MXM module designs
+
+#define EXT_HPDPIN_LUTINDEX_0                   0
+#define EXT_HPDPIN_LUTINDEX_1                   1
+#define EXT_HPDPIN_LUTINDEX_2                   2
+#define EXT_HPDPIN_LUTINDEX_3                   3
+#define EXT_HPDPIN_LUTINDEX_4                   4
+#define EXT_HPDPIN_LUTINDEX_5                   5
+#define EXT_HPDPIN_LUTINDEX_6                   6
+#define EXT_HPDPIN_LUTINDEX_7                   7
+#define MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES   (EXT_HPDPIN_LUTINDEX_7+1)
+
+#define EXT_AUXDDC_LUTINDEX_0                   0
+#define EXT_AUXDDC_LUTINDEX_1                   1
+#define EXT_AUXDDC_LUTINDEX_2                   2
+#define EXT_AUXDDC_LUTINDEX_3                   3
+#define EXT_AUXDDC_LUTINDEX_4                   4
+#define EXT_AUXDDC_LUTINDEX_5                   5
+#define EXT_AUXDDC_LUTINDEX_6                   6
+#define EXT_AUXDDC_LUTINDEX_7                   7
+#define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES   (EXT_AUXDDC_LUTINDEX_7+1)
+
+//ucChannelMapping are defined as following
+//for DP connector, eDP, DP to VGA/LVDS
+//Bit[1:0]: Define which pin connect to DP connector DP_Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[3:2]: Define which pin connect to DP connector DP_Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[5:4]: Define which pin connect to DP connector DP_Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[7:6]: Define which pin connect to DP connector DP_Lane3, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+typedef struct _ATOM_DP_CONN_CHANNEL_MAPPING
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucDP_Lane3_Source:2;
+  UCHAR ucDP_Lane2_Source:2;
+  UCHAR ucDP_Lane1_Source:2;
+  UCHAR ucDP_Lane0_Source:2;
+#else
+  UCHAR ucDP_Lane0_Source:2;
+  UCHAR ucDP_Lane1_Source:2;
+  UCHAR ucDP_Lane2_Source:2;
+  UCHAR ucDP_Lane3_Source:2;
+#endif
+}ATOM_DP_CONN_CHANNEL_MAPPING;
+
+//for DVI/HDMI, in dual link case, both links have to have same mapping.
+//Bit[1:0]: Define which pin connect to DVI connector data Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[3:2]: Define which pin connect to DVI connector data Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[5:4]: Define which pin connect to DVI connector data Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[7:6]: Define which pin connect to DVI connector clock lane, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+typedef struct _ATOM_DVI_CONN_CHANNEL_MAPPING
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucDVI_CLK_Source:2;
+  UCHAR ucDVI_DATA0_Source:2;
+  UCHAR ucDVI_DATA1_Source:2;
+  UCHAR ucDVI_DATA2_Source:2;
+#else
+  UCHAR ucDVI_DATA2_Source:2;
+  UCHAR ucDVI_DATA1_Source:2;
+  UCHAR ucDVI_DATA0_Source:2;
+  UCHAR ucDVI_CLK_Source:2;
+#endif
+}ATOM_DVI_CONN_CHANNEL_MAPPING;
+
+typedef struct _EXT_DISPLAY_PATH
+{
+  USHORT  usDeviceTag;                    //A bit vector to show what devices are supported
+  USHORT  usDeviceACPIEnum;               //16bit device ACPI id.
+  USHORT  usDeviceConnector;              //A physical connector for displays to plug in, using object connector definitions
+  UCHAR   ucExtAUXDDCLutIndex;            //An index into external AUX/DDC channel LUT
+  UCHAR   ucExtHPDPINLutIndex;            //An index into external HPD pin LUT
+  USHORT  usExtEncoderObjId;              //external encoder object id
+  union{
+    UCHAR   ucChannelMapping;                  // if ucChannelMapping=0, using default one to one mapping
+    ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping;
+    ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping;
+  };
+  UCHAR   ucChPNInvert;                   // bit vector for up to 8 lanes, =0: P and N is not invert, =1 P and N is inverted
+  USHORT  usCaps;
+  USHORT  usReserved;
+}EXT_DISPLAY_PATH;
+
+#define NUMBER_OF_UCHAR_FOR_GUID          16
+#define MAX_NUMBER_OF_EXT_DISPLAY_PATH    7
+
+//usCaps
+#define  EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE               0x01
+#define  EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN             0x02
+#define  EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204          0x04
+#define  EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT     0x08
+
+typedef  struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  UCHAR                    ucGuid [NUMBER_OF_UCHAR_FOR_GUID];     // a GUID is a 16 byte long string
+  EXT_DISPLAY_PATH         sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
+  UCHAR                    ucChecksum;                            // a simple Checksum of the sum of whole structure equal to 0x0.
+  UCHAR                    uc3DStereoPinId;                       // use for eDP panel
+  UCHAR                    ucRemoteDisplayConfig;
+  UCHAR                    uceDPToLVDSRxId;
+  UCHAR                    ucFixDPVoltageSwing;                   // usCaps[1]=1, this indicate DP_LANE_SET value
+  UCHAR                    Reserved[3];                           // for potential expansion
+}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
+
+//Related definitions, all records are differnt but they have a commond header
+typedef struct _ATOM_COMMON_RECORD_HEADER
+{
+  UCHAR               ucRecordType;                      //An emun to indicate the record type
+  UCHAR               ucRecordSize;                      //The size of the whole record in byte
+}ATOM_COMMON_RECORD_HEADER;
+
+
+#define ATOM_I2C_RECORD_TYPE                           1
+#define ATOM_HPD_INT_RECORD_TYPE                       2
+#define ATOM_OUTPUT_PROTECTION_RECORD_TYPE             3
+#define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE          4
+#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE       5 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
+#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE          6 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
+#define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE      7
+#define ATOM_JTAG_RECORD_TYPE                          8 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
+#define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE              9
+#define ATOM_ENCODER_DVO_CF_RECORD_TYPE                10
+#define ATOM_CONNECTOR_CF_RECORD_TYPE                  11
+#define ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE        12
+#define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE   13
+#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE        14
+#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15
+#define ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE          16 //This is for the case when connectors are not known to object table
+#define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE          17 //This is for the case when connectors are not known to object table
+#define ATOM_OBJECT_LINK_RECORD_TYPE                   18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
+#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE          19
+#define ATOM_ENCODER_CAP_RECORD_TYPE                   20
+#define ATOM_BRACKET_LAYOUT_RECORD_TYPE                21
+
+
+//Must be updated when new record type is added,equal to that record definition!
+#define ATOM_MAX_OBJECT_RECORD_NUMBER             ATOM_ENCODER_CAP_RECORD_TYPE
+
+typedef struct  _ATOM_I2C_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  ATOM_I2C_ID_CONFIG          sucI2cId;
+  UCHAR                       ucI2CAddr;              //The slave address, it's 0 when the record is attached to connector for DDC
+}ATOM_I2C_RECORD;
+
+typedef struct  _ATOM_HPD_INT_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucHPDIntGPIOID;         //Corresponding block in GPIO_PIN_INFO table gives the pin info
+  UCHAR                       ucPlugged_PinState;
+}ATOM_HPD_INT_RECORD;
+
+
+typedef struct  _ATOM_OUTPUT_PROTECTION_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucProtectionFlag;
+  UCHAR                       ucReserved;
+}ATOM_OUTPUT_PROTECTION_RECORD;
+
+typedef struct  _ATOM_CONNECTOR_DEVICE_TAG
+{
+  ULONG                       ulACPIDeviceEnum;       //Reserved for now
+  USHORT                      usDeviceID;             //This Id is same as "ATOM_DEVICE_XXX_SUPPORT"
+  USHORT                      usPadding;
+}ATOM_CONNECTOR_DEVICE_TAG;
+
+typedef struct  _ATOM_CONNECTOR_DEVICE_TAG_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucNumberOfDevice;
+  UCHAR                       ucReserved;
+  ATOM_CONNECTOR_DEVICE_TAG   asDeviceTag[1];         //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation
+}ATOM_CONNECTOR_DEVICE_TAG_RECORD;
+
+
+typedef struct  _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                              ucConfigGPIOID;
+  UCHAR                              ucConfigGPIOState;       //Set to 1 when it's active high to enable external flow in
+  UCHAR                       ucFlowinGPIPID;
+  UCHAR                       ucExtInGPIPID;
+}ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD;
+
+typedef struct  _ATOM_ENCODER_FPGA_CONTROL_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucCTL1GPIO_ID;
+  UCHAR                       ucCTL1GPIOState;        //Set to 1 when it's active high
+  UCHAR                       ucCTL2GPIO_ID;
+  UCHAR                       ucCTL2GPIOState;        //Set to 1 when it's active high
+  UCHAR                       ucCTL3GPIO_ID;
+  UCHAR                       ucCTL3GPIOState;        //Set to 1 when it's active high
+  UCHAR                       ucCTLFPGA_IN_ID;
+  UCHAR                       ucPadding[3];
+}ATOM_ENCODER_FPGA_CONTROL_RECORD;
+
+typedef struct  _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucGPIOID;               //Corresponding block in GPIO_PIN_INFO table gives the pin info
+  UCHAR                       ucTVActiveState;        //Indicating when the pin==0 or 1 when TV is connected
+}ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD;
+
+typedef struct  _ATOM_JTAG_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucTMSGPIO_ID;
+  UCHAR                       ucTMSGPIOState;         //Set to 1 when it's active high
+  UCHAR                       ucTCKGPIO_ID;
+  UCHAR                       ucTCKGPIOState;         //Set to 1 when it's active high
+  UCHAR                       ucTDOGPIO_ID;
+  UCHAR                       ucTDOGPIOState;         //Set to 1 when it's active high
+  UCHAR                       ucTDIGPIO_ID;
+  UCHAR                       ucTDIGPIOState;         //Set to 1 when it's active high
+  UCHAR                       ucPadding[2];
+}ATOM_JTAG_RECORD;
+
+
+//The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually
+typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR
+{
+  UCHAR                       ucGPIOID;               // GPIO_ID, find the corresponding ID in GPIO_LUT table
+  UCHAR                       ucGPIO_PinState;        // Pin state showing how to set-up the pin
+}ATOM_GPIO_PIN_CONTROL_PAIR;
+
+typedef struct  _ATOM_OBJECT_GPIO_CNTL_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucFlags;                // Future expnadibility
+  UCHAR                       ucNumberOfPins;         // Number of GPIO pins used to control the object
+  ATOM_GPIO_PIN_CONTROL_PAIR  asGpio[1];              // the real gpio pin pair determined by number of pins ucNumberOfPins
+}ATOM_OBJECT_GPIO_CNTL_RECORD;
+
+//Definitions for GPIO pin state
+#define GPIO_PIN_TYPE_INPUT             0x00
+#define GPIO_PIN_TYPE_OUTPUT            0x10
+#define GPIO_PIN_TYPE_HW_CONTROL        0x20
+
+//For GPIO_PIN_TYPE_OUTPUT the following is defined
+#define GPIO_PIN_OUTPUT_STATE_MASK      0x01
+#define GPIO_PIN_OUTPUT_STATE_SHIFT     0
+#define GPIO_PIN_STATE_ACTIVE_LOW       0x0
+#define GPIO_PIN_STATE_ACTIVE_HIGH      0x1
+
+// Indexes to GPIO array in GLSync record
+// GLSync record is for Frame Lock/Gen Lock feature.
+#define ATOM_GPIO_INDEX_GLSYNC_REFCLK    0
+#define ATOM_GPIO_INDEX_GLSYNC_HSYNC     1
+#define ATOM_GPIO_INDEX_GLSYNC_VSYNC     2
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_REQ  3
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_GNT  4
+#define ATOM_GPIO_INDEX_GLSYNC_INTERRUPT 5
+#define ATOM_GPIO_INDEX_GLSYNC_V_RESET   6
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_CNTL 7
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_SEL  8
+#define ATOM_GPIO_INDEX_GLSYNC_MAX       9
+
+typedef struct  _ATOM_ENCODER_DVO_CF_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  ULONG                       ulStrengthControl;      // DVOA strength control for CF
+  UCHAR                       ucPadding[2];
+}ATOM_ENCODER_DVO_CF_RECORD;
+
+// Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap
+#define ATOM_ENCODER_CAP_RECORD_HBR2                  0x01         // DP1.2 HBR2 is supported by HW encoder
+#define ATOM_ENCODER_CAP_RECORD_HBR2_EN               0x02         // DP1.2 HBR2 setting is qualified and HBR2 can be enabled
+#define ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN          0x04         // HDMI2.0 6Gbps enable or not.
+
+typedef struct  _ATOM_ENCODER_CAP_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  union {
+    USHORT                    usEncoderCap;
+    struct {
+#if ATOM_BIG_ENDIAN
+      USHORT                  usReserved:14;        // Bit1-15 may be defined for other capability in future
+      USHORT                  usHBR2En:1;           // Bit1 is for DP1.2 HBR2 enable
+      USHORT                  usHBR2Cap:1;          // Bit0 is for DP1.2 HBR2 capability.
+#else
+      USHORT                  usHBR2Cap:1;          // Bit0 is for DP1.2 HBR2 capability.
+      USHORT                  usHBR2En:1;           // Bit1 is for DP1.2 HBR2 enable
+      USHORT                  usReserved:14;        // Bit1-15 may be defined for other capability in future
+#endif
+    };
+  };
+}ATOM_ENCODER_CAP_RECORD;
+
+// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle
+#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA   1
+#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB   2
+
+typedef struct  _ATOM_CONNECTOR_CF_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  USHORT                      usMaxPixClk;
+  UCHAR                       ucFlowCntlGpioId;
+  UCHAR                       ucSwapCntlGpioId;
+  UCHAR                       ucConnectedDvoBundle;
+  UCHAR                       ucPadding;
+}ATOM_CONNECTOR_CF_RECORD;
+
+typedef struct  _ATOM_CONNECTOR_HARDCODE_DTD_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+   ATOM_DTD_FORMAT                     asTiming;
+}ATOM_CONNECTOR_HARDCODE_DTD_RECORD;
+
+typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;                //ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE
+  UCHAR                       ucSubConnectorType;     //CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A
+  UCHAR                       ucReserved;
+}ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD;
+
+
+typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD
+{
+   ATOM_COMMON_RECORD_HEADER   sheader;
+   UCHAR                                    ucMuxType;                     //decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state
+   UCHAR                                    ucMuxControlPin;
+   UCHAR                                    ucMuxState[2];               //for alligment purpose
+}ATOM_ROUTER_DDC_PATH_SELECT_RECORD;
+
+typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD
+{
+   ATOM_COMMON_RECORD_HEADER   sheader;
+   UCHAR                                    ucMuxType;
+   UCHAR                                    ucMuxControlPin;
+   UCHAR                                    ucMuxState[2];               //for alligment purpose
+}ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD;
+
+// define ucMuxType
+#define ATOM_ROUTER_MUX_PIN_STATE_MASK                        0x0f
+#define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT      0x01
+
+typedef struct _ATOM_CONNECTOR_HPDPIN_LUT_RECORD     //record for ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucHPDPINMap[MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES];  //An fixed size array which maps external pins to internal GPIO_PIN_INFO table
+}ATOM_CONNECTOR_HPDPIN_LUT_RECORD;
+
+typedef struct _ATOM_CONNECTOR_AUXDDC_LUT_RECORD  //record for ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  ATOM_I2C_ID_CONFIG          ucAUXDDCMap[MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES];  //An fixed size array which maps external pins to internal DDC ID
+}ATOM_CONNECTOR_AUXDDC_LUT_RECORD;
+
+typedef struct _ATOM_OBJECT_LINK_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  USHORT                      usObjectID;         //could be connector, encorder or other object in object.h
+}ATOM_OBJECT_LINK_RECORD;
+
+typedef struct _ATOM_CONNECTOR_REMOTE_CAP_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  USHORT                      usReserved;
+}ATOM_CONNECTOR_REMOTE_CAP_RECORD;
+
+typedef struct  _ATOM_CONNECTOR_LAYOUT_INFO
+{
+   USHORT usConnectorObjectId;
+   UCHAR  ucConnectorType;
+   UCHAR  ucPosition;
+}ATOM_CONNECTOR_LAYOUT_INFO;
+
+// define ATOM_CONNECTOR_LAYOUT_INFO.ucConnectorType to describe the display connector size
+#define CONNECTOR_TYPE_DVI_D                 1
+#define CONNECTOR_TYPE_DVI_I                 2
+#define CONNECTOR_TYPE_VGA                   3
+#define CONNECTOR_TYPE_HDMI                  4
+#define CONNECTOR_TYPE_DISPLAY_PORT          5
+#define CONNECTOR_TYPE_MINI_DISPLAY_PORT     6
+
+typedef struct  _ATOM_BRACKET_LAYOUT_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucLength;
+  UCHAR                       ucWidth;
+  UCHAR                       ucConnNum;
+  UCHAR                       ucReserved;
+  ATOM_CONNECTOR_LAYOUT_INFO  asConnInfo[1];
+}ATOM_BRACKET_LAYOUT_RECORD;
+
+
+/****************************************************************************/
+// Structure used in XXXX
+/****************************************************************************/
+typedef struct  _ATOM_VOLTAGE_INFO_HEADER
+{
+   USHORT   usVDDCBaseLevel;                //In number of 50mv unit
+   USHORT   usReserved;                     //For possible extension table offset
+   UCHAR    ucNumOfVoltageEntries;
+   UCHAR    ucBytesPerVoltageEntry;
+   UCHAR    ucVoltageStep;                  //Indicating in how many mv increament is one step, 0.5mv unit
+   UCHAR    ucDefaultVoltageEntry;
+   UCHAR    ucVoltageControlI2cLine;
+   UCHAR    ucVoltageControlAddress;
+   UCHAR    ucVoltageControlOffset;
+}ATOM_VOLTAGE_INFO_HEADER;
+
+typedef struct  _ATOM_VOLTAGE_INFO
+{
+   ATOM_COMMON_TABLE_HEADER   sHeader;
+   ATOM_VOLTAGE_INFO_HEADER viHeader;
+   UCHAR    ucVoltageEntries[64];            //64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry
+}ATOM_VOLTAGE_INFO;
+
+
+typedef struct  _ATOM_VOLTAGE_FORMULA
+{
+   USHORT   usVoltageBaseLevel;             // In number of 1mv unit
+   USHORT   usVoltageStep;                  // Indicating in how many mv increament is one step, 1mv unit
+   UCHAR    ucNumOfVoltageEntries;          // Number of Voltage Entry, which indicate max Voltage
+   UCHAR    ucFlag;                         // bit0=0 :step is 1mv =1 0.5mv
+   UCHAR    ucBaseVID;                      // if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep
+   UCHAR    ucReserved;
+   UCHAR    ucVIDAdjustEntries[32];         // 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries
+}ATOM_VOLTAGE_FORMULA;
+
+typedef struct  _VOLTAGE_LUT_ENTRY
+{
+    USHORT     usVoltageCode;               // The Voltage ID, either GPIO or I2C code
+    USHORT     usVoltageValue;              // The corresponding Voltage Value, in mV
+}VOLTAGE_LUT_ENTRY;
+
+typedef struct  _ATOM_VOLTAGE_FORMULA_V2
+{
+    UCHAR      ucNumOfVoltageEntries;               // Number of Voltage Entry, which indicate max Voltage
+    UCHAR      ucReserved[3];
+    VOLTAGE_LUT_ENTRY asVIDAdjustEntries[32];// 32 is for allocation, the actual number of entries is in ucNumOfVoltageEntries
+}ATOM_VOLTAGE_FORMULA_V2;
+
+typedef struct _ATOM_VOLTAGE_CONTROL
+{
+  UCHAR    ucVoltageControlId;                     //Indicate it is controlled by I2C or GPIO or HW state machine
+  UCHAR    ucVoltageControlI2cLine;
+  UCHAR    ucVoltageControlAddress;
+  UCHAR    ucVoltageControlOffset;
+  USHORT   usGpioPin_AIndex;                       //GPIO_PAD register index
+  UCHAR    ucGpioPinBitShift[9];                   //at most 8 pin support 255 VIDs, termintate with 0xff
+  UCHAR    ucReserved;
+}ATOM_VOLTAGE_CONTROL;
+
+// Define ucVoltageControlId
+#define VOLTAGE_CONTROLLED_BY_HW              0x00
+#define VOLTAGE_CONTROLLED_BY_I2C_MASK        0x7F
+#define VOLTAGE_CONTROLLED_BY_GPIO            0x80
+#define VOLTAGE_CONTROL_ID_LM64               0x01                           //I2C control, used for R5xx Core Voltage
+#define VOLTAGE_CONTROL_ID_DAC                0x02                           //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI
+#define VOLTAGE_CONTROL_ID_VT116xM            0x03                           //I2C control, used for R6xx Core Voltage
+#define VOLTAGE_CONTROL_ID_DS4402             0x04
+#define VOLTAGE_CONTROL_ID_UP6266             0x05
+#define VOLTAGE_CONTROL_ID_SCORPIO            0x06
+#define VOLTAGE_CONTROL_ID_VT1556M            0x07
+#define VOLTAGE_CONTROL_ID_CHL822x            0x08
+#define VOLTAGE_CONTROL_ID_VT1586M            0x09
+#define VOLTAGE_CONTROL_ID_UP1637             0x0A
+#define VOLTAGE_CONTROL_ID_CHL8214            0x0B
+#define VOLTAGE_CONTROL_ID_UP1801             0x0C
+#define VOLTAGE_CONTROL_ID_ST6788A            0x0D
+#define VOLTAGE_CONTROL_ID_CHLIR3564SVI2      0x0E
+#define VOLTAGE_CONTROL_ID_AD527x      	      0x0F
+#define VOLTAGE_CONTROL_ID_NCP81022    	      0x10
+#define VOLTAGE_CONTROL_ID_LTC2635			  0x11
+#define VOLTAGE_CONTROL_ID_NCP4208	          0x12
+#define VOLTAGE_CONTROL_ID_IR35xx             0x13
+#define VOLTAGE_CONTROL_ID_RT9403	          0x14
+
+#define VOLTAGE_CONTROL_ID_GENERIC_I2C        0x40
+
+typedef struct  _ATOM_VOLTAGE_OBJECT
+{
+   UCHAR      ucVoltageType;                           //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
+   UCHAR      ucSize;                                       //Size of Object
+   ATOM_VOLTAGE_CONTROL         asControl;         //describ how to control
+   ATOM_VOLTAGE_FORMULA         asFormula;         //Indicate How to convert real Voltage to VID
+}ATOM_VOLTAGE_OBJECT;
+
+typedef struct  _ATOM_VOLTAGE_OBJECT_V2
+{
+    UCHAR ucVoltageType;                      //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
+    UCHAR ucSize;                             //Size of Object
+    ATOM_VOLTAGE_CONTROL    asControl;        //describ how to control
+    ATOM_VOLTAGE_FORMULA_V2 asFormula;        //Indicate How to convert real Voltage to VID
+}ATOM_VOLTAGE_OBJECT_V2;
+
+typedef struct  _ATOM_VOLTAGE_OBJECT_INFO
+{
+   ATOM_COMMON_TABLE_HEADER   sHeader;
+   ATOM_VOLTAGE_OBJECT        asVoltageObj[3];   //Info for Voltage control
+}ATOM_VOLTAGE_OBJECT_INFO;
+
+typedef struct  _ATOM_VOLTAGE_OBJECT_INFO_V2
+{
+   ATOM_COMMON_TABLE_HEADER   sHeader;
+    ATOM_VOLTAGE_OBJECT_V2    asVoltageObj[3];   //Info for Voltage control
+}ATOM_VOLTAGE_OBJECT_INFO_V2;
+
+typedef struct  _ATOM_LEAKID_VOLTAGE
+{
+   UCHAR    ucLeakageId;
+   UCHAR    ucReserved;
+   USHORT   usVoltage;
+}ATOM_LEAKID_VOLTAGE;
+
+typedef struct _ATOM_VOLTAGE_OBJECT_HEADER_V3{
+   UCHAR    ucVoltageType;                            //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
+   UCHAR    ucVoltageMode;                            //Indicate voltage control mode: Init/Set/Leakage/Set phase
+   USHORT   usSize;                                   //Size of Object
+}ATOM_VOLTAGE_OBJECT_HEADER_V3;
+
+// ATOM_VOLTAGE_OBJECT_HEADER_V3.ucVoltageMode
+#define VOLTAGE_OBJ_GPIO_LUT                 0        //VOLTAGE and GPIO Lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3
+#define VOLTAGE_OBJ_VR_I2C_INIT_SEQ          3        //VOLTAGE REGULATOR INIT sequece through I2C -> ATOM_I2C_VOLTAGE_OBJECT_V3
+#define VOLTAGE_OBJ_PHASE_LUT                4        //Set Vregulator Phase lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3
+#define VOLTAGE_OBJ_SVID2                    7        //Indicate voltage control by SVID2 ->ATOM_SVID2_VOLTAGE_OBJECT_V3
+#define VOLTAGE_OBJ_EVV                      8
+#define VOLTAGE_OBJ_PWRBOOST_LEAKAGE_LUT     0x10     //Powerboost Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
+#define VOLTAGE_OBJ_HIGH_STATE_LEAKAGE_LUT   0x11     //High voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
+#define VOLTAGE_OBJ_HIGH1_STATE_LEAKAGE_LUT  0x12     //High1 voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
+
+typedef struct  _VOLTAGE_LUT_ENTRY_V2
+{
+  ULONG   ulVoltageId;                       // The Voltage ID which is used to program GPIO register
+  USHORT  usVoltageValue;                    // The corresponding Voltage Value, in mV
+}VOLTAGE_LUT_ENTRY_V2;
+
+typedef struct  _LEAKAGE_VOLTAGE_LUT_ENTRY_V2
+{
+  USHORT  usVoltageLevel;                    // The Voltage ID which is used to program GPIO register
+  USHORT  usVoltageId;
+  USHORT  usLeakageId;                       // The corresponding Voltage Value, in mV
+}LEAKAGE_VOLTAGE_LUT_ENTRY_V2;
+
+
+typedef struct  _ATOM_I2C_VOLTAGE_OBJECT_V3
+{
+   ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;    // voltage mode = VOLTAGE_OBJ_VR_I2C_INIT_SEQ
+   UCHAR  ucVoltageRegulatorId;              //Indicate Voltage Regulator Id
+   UCHAR  ucVoltageControlI2cLine;
+   UCHAR  ucVoltageControlAddress;
+   UCHAR  ucVoltageControlOffset;
+   UCHAR  ucVoltageControlFlag;              // Bit0: 0 - One byte data; 1 - Two byte data
+   UCHAR  ulReserved[3];
+   VOLTAGE_LUT_ENTRY asVolI2cLut[1];         // end with 0xff
+}ATOM_I2C_VOLTAGE_OBJECT_V3;
+
+// ATOM_I2C_VOLTAGE_OBJECT_V3.ucVoltageControlFlag
+#define VOLTAGE_DATA_ONE_BYTE                0
+#define VOLTAGE_DATA_TWO_BYTE                1
+
+typedef struct  _ATOM_GPIO_VOLTAGE_OBJECT_V3
+{
+   ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;    // voltage mode = VOLTAGE_OBJ_GPIO_LUT or VOLTAGE_OBJ_PHASE_LUT
+   UCHAR  ucVoltageGpioCntlId;               // default is 0 which indicate control through CG VID mode
+   UCHAR  ucGpioEntryNum;                    // indiate the entry numbers of Votlage/Gpio value Look up table
+   UCHAR  ucPhaseDelay;                      // phase delay in unit of micro second
+   UCHAR  ucReserved;
+   ULONG  ulGpioMaskVal;                     // GPIO Mask value
+   VOLTAGE_LUT_ENTRY_V2 asVolGpioLut[1];
+}ATOM_GPIO_VOLTAGE_OBJECT_V3;
+
+typedef struct  _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
+{
+   ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;    // voltage mode = 0x10/0x11/0x12
+   UCHAR    ucLeakageCntlId;                 // default is 0
+   UCHAR    ucLeakageEntryNum;               // indicate the entry number of LeakageId/Voltage Lut table
+   UCHAR    ucReserved[2];
+   ULONG    ulMaxVoltageLevel;
+   LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1];
+}ATOM_LEAKAGE_VOLTAGE_OBJECT_V3;
+
+
+typedef struct  _ATOM_SVID2_VOLTAGE_OBJECT_V3
+{
+   ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;    // voltage mode = VOLTAGE_OBJ_SVID2
+// 14:7 � PSI0_VID
+// 6 � PSI0_EN
+// 5 � PSI1
+// 4:2 � load line slope trim.
+// 1:0 � offset trim,
+   USHORT   usLoadLine_PSI;
+// GPU GPIO pin Id to SVID2 regulator VRHot pin. possible value 0~31. 0 means GPIO0, 31 means GPIO31
+   UCHAR    ucSVDGpioId;     //0~31 indicate GPIO0~31
+   UCHAR    ucSVCGpioId;     //0~31 indicate GPIO0~31
+   ULONG    ulReserved;
+}ATOM_SVID2_VOLTAGE_OBJECT_V3;
+
+typedef union _ATOM_VOLTAGE_OBJECT_V3{
+  ATOM_GPIO_VOLTAGE_OBJECT_V3 asGpioVoltageObj;
+  ATOM_I2C_VOLTAGE_OBJECT_V3 asI2cVoltageObj;
+  ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 asLeakageObj;
+  ATOM_SVID2_VOLTAGE_OBJECT_V3 asSVID2Obj;
+}ATOM_VOLTAGE_OBJECT_V3;
+
+typedef struct  _ATOM_VOLTAGE_OBJECT_INFO_V3_1
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ATOM_VOLTAGE_OBJECT_V3     asVoltageObj[3];   //Info for Voltage control
+}ATOM_VOLTAGE_OBJECT_INFO_V3_1;
+
+
+typedef struct  _ATOM_ASIC_PROFILE_VOLTAGE
+{
+   UCHAR    ucProfileId;
+   UCHAR    ucReserved;
+   USHORT   usSize;
+   USHORT   usEfuseSpareStartAddr;
+   USHORT   usFuseIndex[8];                                    //from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id,
+   ATOM_LEAKID_VOLTAGE               asLeakVol[2];         //Leakid and relatd voltage
+}ATOM_ASIC_PROFILE_VOLTAGE;
+
+//ucProfileId
+#define   ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE                     1
+#define   ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE         1
+#define   ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE             2
+
+typedef struct  _ATOM_ASIC_PROFILING_INFO
+{
+  ATOM_COMMON_TABLE_HEADER         asHeader;
+  ATOM_ASIC_PROFILE_VOLTAGE        asVoltage;
+}ATOM_ASIC_PROFILING_INFO;
+
+typedef struct  _ATOM_ASIC_PROFILING_INFO_V2_1
+{
+  ATOM_COMMON_TABLE_HEADER         asHeader;
+  UCHAR  ucLeakageBinNum;                // indicate the entry number of LeakageId/Voltage Lut table
+  USHORT usLeakageBinArrayOffset;        // offset of USHORT Leakage Bin list array ( from lower LeakageId to higher)
+
+  UCHAR  ucElbVDDC_Num;
+  USHORT usElbVDDC_IdArrayOffset;        // offset of USHORT virtual VDDC voltage id ( 0xff01~0xff08 )
+  USHORT usElbVDDC_LevelArrayOffset;     // offset of 2 dimension voltage level USHORT array
+
+  UCHAR  ucElbVDDCI_Num;
+  USHORT usElbVDDCI_IdArrayOffset;       // offset of USHORT virtual VDDCI voltage id ( 0xff01~0xff08 )
+  USHORT usElbVDDCI_LevelArrayOffset;    // offset of 2 dimension voltage level USHORT array
+}ATOM_ASIC_PROFILING_INFO_V2_1;
+
+
+//Here is parameter to convert Efuse value to Measure value
+//Measured = LN((2^Bitsize-1)/EFUSE-1)*(Range)/(-alpha)+(Max+Min)/2
+typedef struct _EFUSE_LOGISTIC_FUNC_PARAM
+{
+  USHORT usEfuseIndex;                  // Efuse Index in DWORD address, for example Index 911, usEuseIndex=112
+  UCHAR  ucEfuseBitLSB;                 // Efuse bit LSB in DWORD address, for example Index 911, usEfuseBitLSB= 911-112*8=15
+  UCHAR  ucEfuseLength;                 // Efuse bits length,
+  ULONG  ulEfuseEncodeRange;            // Range = Max - Min, bit31 indicate the efuse is negative number
+  ULONG  ulEfuseEncodeAverage;          // Average = ( Max + Min )/2
+}EFUSE_LOGISTIC_FUNC_PARAM;
+
+//Linear Function: Measured = Round ( Efuse * ( Max-Min )/(2^BitSize -1 ) + Min )
+typedef struct _EFUSE_LINEAR_FUNC_PARAM
+{
+  USHORT usEfuseIndex;                  // Efuse Index in DWORD address, for example Index 911, usEuseIndex=112
+  UCHAR  ucEfuseBitLSB;                 // Efuse bit LSB in DWORD address, for example Index 911, usEfuseBitLSB= 911-112*8=15
+  UCHAR  ucEfuseLength;                 // Efuse bits length,
+  ULONG  ulEfuseEncodeRange;            // Range = Max - Min, bit31 indicate the efuse is negative number
+  ULONG  ulEfuseMin;                    // Min
+}EFUSE_LINEAR_FUNC_PARAM;
+
+
+typedef struct  _ATOM_ASIC_PROFILING_INFO_V3_1
+{
+  ATOM_COMMON_TABLE_HEADER         asHeader;
+  ULONG  ulEvvDerateTdp;
+  ULONG  ulEvvDerateTdc;
+  ULONG  ulBoardCoreTemp;
+  ULONG  ulMaxVddc;
+  ULONG  ulMinVddc;
+  ULONG  ulLoadLineSlop;
+  ULONG  ulLeakageTemp;
+  ULONG  ulLeakageVoltage;
+  EFUSE_LINEAR_FUNC_PARAM sCACm;
+  EFUSE_LINEAR_FUNC_PARAM sCACb;
+  EFUSE_LOGISTIC_FUNC_PARAM sKt_b;
+  EFUSE_LOGISTIC_FUNC_PARAM sKv_m;
+  EFUSE_LOGISTIC_FUNC_PARAM sKv_b;
+  USHORT usLkgEuseIndex;
+  UCHAR  ucLkgEfuseBitLSB;
+  UCHAR  ucLkgEfuseLength;
+  ULONG  ulLkgEncodeLn_MaxDivMin;
+  ULONG  ulLkgEncodeMax;
+  ULONG  ulLkgEncodeMin;
+  ULONG  ulEfuseLogisticAlpha;
+  USHORT usPowerDpm0;
+  USHORT usCurrentDpm0;
+  USHORT usPowerDpm1;
+  USHORT usCurrentDpm1;
+  USHORT usPowerDpm2;
+  USHORT usCurrentDpm2;
+  USHORT usPowerDpm3;
+  USHORT usCurrentDpm3;
+  USHORT usPowerDpm4;
+  USHORT usCurrentDpm4;
+  USHORT usPowerDpm5;
+  USHORT usCurrentDpm5;
+  USHORT usPowerDpm6;
+  USHORT usCurrentDpm6;
+  USHORT usPowerDpm7;
+  USHORT usCurrentDpm7;
+}ATOM_ASIC_PROFILING_INFO_V3_1;
+
+
+typedef struct  _ATOM_ASIC_PROFILING_INFO_V3_2
+{
+  ATOM_COMMON_TABLE_HEADER         asHeader;
+  ULONG  ulEvvLkgFactor;
+  ULONG  ulBoardCoreTemp;
+  ULONG  ulMaxVddc;
+  ULONG  ulMinVddc;
+  ULONG  ulLoadLineSlop;
+  ULONG  ulLeakageTemp;
+  ULONG  ulLeakageVoltage;
+  EFUSE_LINEAR_FUNC_PARAM sCACm;
+  EFUSE_LINEAR_FUNC_PARAM sCACb;
+  EFUSE_LOGISTIC_FUNC_PARAM sKt_b;
+  EFUSE_LOGISTIC_FUNC_PARAM sKv_m;
+  EFUSE_LOGISTIC_FUNC_PARAM sKv_b;
+  USHORT usLkgEuseIndex;
+  UCHAR  ucLkgEfuseBitLSB;
+  UCHAR  ucLkgEfuseLength;
+  ULONG  ulLkgEncodeLn_MaxDivMin;
+  ULONG  ulLkgEncodeMax;
+  ULONG  ulLkgEncodeMin;
+  ULONG  ulEfuseLogisticAlpha;
+  USHORT usPowerDpm0;
+  USHORT usPowerDpm1;
+  USHORT usPowerDpm2;
+  USHORT usPowerDpm3;
+  USHORT usPowerDpm4;
+  USHORT usPowerDpm5;
+  USHORT usPowerDpm6;
+  USHORT usPowerDpm7;
+  ULONG  ulTdpDerateDPM0;
+  ULONG  ulTdpDerateDPM1;
+  ULONG  ulTdpDerateDPM2;
+  ULONG  ulTdpDerateDPM3;
+  ULONG  ulTdpDerateDPM4;
+  ULONG  ulTdpDerateDPM5;
+  ULONG  ulTdpDerateDPM6;
+  ULONG  ulTdpDerateDPM7;
+}ATOM_ASIC_PROFILING_INFO_V3_2;
+
+
+// for Tonga/Fiji speed EVV algorithm
+typedef struct  _ATOM_ASIC_PROFILING_INFO_V3_3
+{
+  ATOM_COMMON_TABLE_HEADER         asHeader;
+  ULONG  ulEvvLkgFactor;
+  ULONG  ulBoardCoreTemp;
+  ULONG  ulMaxVddc;
+  ULONG  ulMinVddc;
+  ULONG  ulLoadLineSlop;
+  ULONG  ulLeakageTemp;
+  ULONG  ulLeakageVoltage;
+  EFUSE_LINEAR_FUNC_PARAM sCACm;
+  EFUSE_LINEAR_FUNC_PARAM sCACb;
+  EFUSE_LOGISTIC_FUNC_PARAM sKt_b;
+  EFUSE_LOGISTIC_FUNC_PARAM sKv_m;
+  EFUSE_LOGISTIC_FUNC_PARAM sKv_b;
+  USHORT usLkgEuseIndex;
+  UCHAR  ucLkgEfuseBitLSB;
+  UCHAR  ucLkgEfuseLength;
+  ULONG  ulLkgEncodeLn_MaxDivMin;
+  ULONG  ulLkgEncodeMax;
+  ULONG  ulLkgEncodeMin;
+  ULONG  ulEfuseLogisticAlpha;
+  USHORT usPowerDpm0;
+  USHORT usPowerDpm1;
+  USHORT usPowerDpm2;
+  USHORT usPowerDpm3;
+  USHORT usPowerDpm4;
+  USHORT usPowerDpm5;
+  USHORT usPowerDpm6;
+  USHORT usPowerDpm7;
+  ULONG  ulTdpDerateDPM0;
+  ULONG  ulTdpDerateDPM1;
+  ULONG  ulTdpDerateDPM2;
+  ULONG  ulTdpDerateDPM3;
+  ULONG  ulTdpDerateDPM4;
+  ULONG  ulTdpDerateDPM5;
+  ULONG  ulTdpDerateDPM6;
+  ULONG  ulTdpDerateDPM7;
+  EFUSE_LINEAR_FUNC_PARAM sRoFuse;
+  ULONG  ulRoAlpha;
+  ULONG  ulRoBeta;
+  ULONG  ulRoGamma;
+  ULONG  ulRoEpsilon;
+  ULONG  ulATermRo;
+  ULONG  ulBTermRo;
+  ULONG  ulCTermRo;
+  ULONG  ulSclkMargin;
+  ULONG  ulFmaxPercent;
+  ULONG  ulCRPercent;
+  ULONG  ulSFmaxPercent;
+  ULONG  ulSCRPercent;
+  ULONG  ulSDCMargine;
+}ATOM_ASIC_PROFILING_INFO_V3_3;
+
+typedef struct _ATOM_POWER_SOURCE_OBJECT
+{
+   UCHAR  ucPwrSrcId;                                   // Power source
+   UCHAR  ucPwrSensorType;                              // GPIO, I2C or none
+   UCHAR  ucPwrSensId;                                  // if GPIO detect, it is GPIO id,  if I2C detect, it is I2C id
+   UCHAR  ucPwrSensSlaveAddr;                           // Slave address if I2C detect
+   UCHAR  ucPwrSensRegIndex;                            // I2C register Index if I2C detect
+   UCHAR  ucPwrSensRegBitMask;                          // detect which bit is used if I2C detect
+   UCHAR  ucPwrSensActiveState;                         // high active or low active
+   UCHAR  ucReserve[3];                                 // reserve
+   USHORT usSensPwr;                                    // in unit of watt
+}ATOM_POWER_SOURCE_OBJECT;
+
+typedef struct _ATOM_POWER_SOURCE_INFO
+{
+      ATOM_COMMON_TABLE_HEADER      asHeader;
+      UCHAR                                    asPwrbehave[16];
+      ATOM_POWER_SOURCE_OBJECT      asPwrObj[1];
+}ATOM_POWER_SOURCE_INFO;
+
+
+//Define ucPwrSrcId
+#define POWERSOURCE_PCIE_ID1                  0x00
+#define POWERSOURCE_6PIN_CONNECTOR_ID1   0x01
+#define POWERSOURCE_8PIN_CONNECTOR_ID1   0x02
+#define POWERSOURCE_6PIN_CONNECTOR_ID2   0x04
+#define POWERSOURCE_8PIN_CONNECTOR_ID2   0x08
+
+//define ucPwrSensorId
+#define POWER_SENSOR_ALWAYS                     0x00
+#define POWER_SENSOR_GPIO                        0x01
+#define POWER_SENSOR_I2C                        0x02
+
+typedef struct _ATOM_CLK_VOLT_CAPABILITY
+{
+  ULONG      ulVoltageIndex;                      // The Voltage Index indicated by FUSE, same voltage index shared with SCLK DPM fuse table
+  ULONG      ulMaximumSupportedCLK;               // Maximum clock supported with specified voltage index, unit in 10kHz
+}ATOM_CLK_VOLT_CAPABILITY;
+
+
+typedef struct _ATOM_CLK_VOLT_CAPABILITY_V2
+{
+  USHORT     usVoltageLevel;                      // The real Voltage Level round up value in unit of mv,
+  ULONG      ulMaximumSupportedCLK;               // Maximum clock supported with specified voltage index, unit in 10kHz
+}ATOM_CLK_VOLT_CAPABILITY_V2;
+
+typedef struct _ATOM_AVAILABLE_SCLK_LIST
+{
+  ULONG      ulSupportedSCLK;               // Maximum clock supported with specified voltage index,  unit in 10kHz
+  USHORT     usVoltageIndex;                // The Voltage Index indicated by FUSE for specified SCLK
+  USHORT     usVoltageID;                   // The Voltage ID indicated by FUSE for specified SCLK
+}ATOM_AVAILABLE_SCLK_LIST;
+
+// ATOM_INTEGRATED_SYSTEM_INFO_V6 ulSystemConfig cap definition
+#define ATOM_IGP_INFO_V6_SYSTEM_CONFIG__PCIE_POWER_GATING_ENABLE             1       // refer to ulSystemConfig bit[0]
+
+// this IntegrateSystemInfoTable is used for Liano/Ontario APU
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ULONG  ulBootUpEngineClock;
+  ULONG  ulDentistVCOFreq;
+  ULONG  ulBootUpUMAClock;
+  ATOM_CLK_VOLT_CAPABILITY   sDISPCLK_Voltage[4];
+  ULONG  ulBootUpReqDisplayVector;
+  ULONG  ulOtherDisplayMisc;
+  ULONG  ulGPUCapInfo;
+  ULONG  ulSB_MMIO_Base_Addr;
+  USHORT usRequestedPWMFreqInHz;
+  UCHAR  ucHtcTmpLmt;
+  UCHAR  ucHtcHystLmt;
+  ULONG  ulMinEngineClock;
+  ULONG  ulSystemConfig;
+  ULONG  ulCPUCapInfo;
+  USHORT usNBP0Voltage;
+  USHORT usNBP1Voltage;
+  USHORT usBootUpNBVoltage;
+  USHORT usExtDispConnInfoOffset;
+  USHORT usPanelRefreshRateRange;
+  UCHAR  ucMemoryType;
+  UCHAR  ucUMAChannelNumber;
+  ULONG  ulCSR_M3_ARB_CNTL_DEFAULT[10];
+  ULONG  ulCSR_M3_ARB_CNTL_UVD[10];
+  ULONG  ulCSR_M3_ARB_CNTL_FS3D[10];
+  ATOM_AVAILABLE_SCLK_LIST   sAvail_SCLK[5];
+  ULONG  ulGMCRestoreResetTime;
+  ULONG  ulMinimumNClk;
+  ULONG  ulIdleNClk;
+  ULONG  ulDDR_DLL_PowerUpTime;
+  ULONG  ulDDR_PLL_PowerUpTime;
+  USHORT usPCIEClkSSPercentage;
+  USHORT usPCIEClkSSType;
+  USHORT usLvdsSSPercentage;
+  USHORT usLvdsSSpreadRateIn10Hz;
+  USHORT usHDMISSPercentage;
+  USHORT usHDMISSpreadRateIn10Hz;
+  USHORT usDVISSPercentage;
+  USHORT usDVISSpreadRateIn10Hz;
+  ULONG  SclkDpmBoostMargin;
+  ULONG  SclkDpmThrottleMargin;
+  USHORT SclkDpmTdpLimitPG;
+  USHORT SclkDpmTdpLimitBoost;
+  ULONG  ulBoostEngineCLock;
+  UCHAR  ulBoostVid_2bit;
+  UCHAR  EnableBoost;
+  USHORT GnbTdpLimit;
+  USHORT usMaxLVDSPclkFreqInSingleLink;
+  UCHAR  ucLvdsMisc;
+  UCHAR  ucLVDSReserved;
+  ULONG  ulReserved3[15];
+  ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
+}ATOM_INTEGRATED_SYSTEM_INFO_V6;
+
+// ulGPUCapInfo
+#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE       0x01
+#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION          0x08
+
+//ucLVDSMisc:
+#define SYS_INFO_LVDSMISC__888_FPDI_MODE                                             0x01
+#define SYS_INFO_LVDSMISC__DL_CH_SWAP                                                0x02
+#define SYS_INFO_LVDSMISC__888_BPC                                                   0x04
+#define SYS_INFO_LVDSMISC__OVERRIDE_EN                                               0x08
+#define SYS_INFO_LVDSMISC__BLON_ACTIVE_LOW                                           0x10
+// new since Trinity
+#define SYS_INFO_LVDSMISC__TRAVIS_LVDS_VOL_OVERRIDE_EN                               0x20
+
+// not used any more
+#define SYS_INFO_LVDSMISC__VSYNC_ACTIVE_LOW                                          0x04
+#define SYS_INFO_LVDSMISC__HSYNC_ACTIVE_LOW                                          0x08
+
+/**********************************************************************************************************************
+  ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
+ulBootUpEngineClock:              VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
+ulDentistVCOFreq:                 Dentist VCO clock in 10kHz unit.
+ulBootUpUMAClock:                 System memory boot up clock frequency in 10Khz unit.
+sDISPCLK_Voltage:                 Report Display clock voltage requirement.
+
+ulBootUpReqDisplayVector:         VBIOS boot up display IDs, following are supported devices in Liano/Ontaio projects:
+                                  ATOM_DEVICE_CRT1_SUPPORT                  0x0001
+                                  ATOM_DEVICE_CRT2_SUPPORT                  0x0010
+                                  ATOM_DEVICE_DFP1_SUPPORT                  0x0008
+                                  ATOM_DEVICE_DFP6_SUPPORT                  0x0040
+                                  ATOM_DEVICE_DFP2_SUPPORT                  0x0080
+                                  ATOM_DEVICE_DFP3_SUPPORT                  0x0200
+                                  ATOM_DEVICE_DFP4_SUPPORT                  0x0400
+                                  ATOM_DEVICE_DFP5_SUPPORT                  0x0800
+                                  ATOM_DEVICE_LCD1_SUPPORT                  0x0002
+ulOtherDisplayMisc:                 Other display related flags, not defined yet.
+ulGPUCapInfo:                     bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
+                                        =1: TMDS/HDMI Coherent Mode use signel PLL mode.
+                                  bit[3]=0: Enable HW AUX mode detection logic
+                                        =1: Disable HW AUX mode dettion logic
+ulSB_MMIO_Base_Addr:              Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
+
+usRequestedPWMFreqInHz:           When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW).
+                                  Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
+
+                                  When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
+                                  1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
+                                  VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
+                                  Changing BL using VBIOS function is functional in both driver and non-driver present environment;
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+                                  2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
+                                  that BL control from GPU is expected.
+                                  VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
+                                  Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
+                                  it's per platform
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+ucHtcTmpLmt:                      Refer to D18F3x64 bit[22:16], HtcTmpLmt.
+                                  Threshold on value to enter HTC_active state.
+ucHtcHystLmt:                     Refer to D18F3x64 bit[27:24], HtcHystLmt.
+                                  To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
+ulMinEngineClock:                 Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
+ulSystemConfig:                   Bit[0]=0: PCIE Power Gating Disabled
+                                        =1: PCIE Power Gating Enabled
+                                  Bit[1]=0: DDR-DLL shut-down feature disabled.
+                                         1: DDR-DLL shut-down feature enabled.
+                                  Bit[2]=0: DDR-PLL Power down feature disabled.
+                                         1: DDR-PLL Power down feature enabled.
+ulCPUCapInfo:                     TBD
+usNBP0Voltage:                    VID for voltage on NB P0 State
+usNBP1Voltage:                    VID for voltage on NB P1 State
+usBootUpNBVoltage:                Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
+usExtDispConnInfoOffset:          Offset to sExtDispConnInfo inside the structure
+usPanelRefreshRateRange:          Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
+                                  to indicate a range.
+                                  SUPPORTED_LCD_REFRESHRATE_30Hz          0x0004
+                                  SUPPORTED_LCD_REFRESHRATE_40Hz          0x0008
+                                  SUPPORTED_LCD_REFRESHRATE_50Hz          0x0010
+                                  SUPPORTED_LCD_REFRESHRATE_60Hz          0x0020
+ucMemoryType:                     [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
+ucUMAChannelNumber:                 System memory channel numbers.
+ulCSR_M3_ARB_CNTL_DEFAULT[10]:    Arrays with values for CSR M3 arbiter for default
+ulCSR_M3_ARB_CNTL_UVD[10]:        Arrays with values for CSR M3 arbiter for UVD playback.
+ulCSR_M3_ARB_CNTL_FS3D[10]:       Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+sAvail_SCLK[5]:                   Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
+ulGMCRestoreResetTime:            GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
+ulMinimumNClk:                    Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz.
+ulIdleNClk:                       NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
+ulDDR_DLL_PowerUpTime:            DDR PHY DLL power up time. Unit in ns.
+ulDDR_PLL_PowerUpTime:            DDR PHY PLL power up time. Unit in ns.
+usPCIEClkSSPercentage:            PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%.
+usPCIEClkSSType:                  PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread.
+usLvdsSSPercentage:               LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting.
+usLvdsSSpreadRateIn10Hz:          LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
+usHDMISSPercentage:               HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting.
+usHDMISSpreadRateIn10Hz:          HDMI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting.
+usDVISSPercentage:                DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting.
+usDVISSpreadRateIn10Hz:           DVI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting.
+usMaxLVDSPclkFreqInSingleLink:    Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
+ucLVDSMisc:                       [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
+                                  [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
+                                  [bit2] LVDS 888bit per color mode  =0: 666 bit per color =1:888 bit per color
+                                  [bit3] LVDS parameter override enable  =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
+                                  [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
+**********************************************************************************************************************/
+
+// this Table is used for Liano/Ontario APU
+typedef struct _ATOM_FUSION_SYSTEM_INFO_V1
+{
+  ATOM_INTEGRATED_SYSTEM_INFO_V6    sIntegratedSysInfo;
+  ULONG  ulPowerplayTable[128];
+}ATOM_FUSION_SYSTEM_INFO_V1;
+
+
+typedef struct _ATOM_TDP_CONFIG_BITS
+{
+#if ATOM_BIG_ENDIAN
+  ULONG   uReserved:2;
+  ULONG   uTDP_Value:14;  // Original TDP value in tens of milli watts
+  ULONG   uCTDP_Value:14; // Override value in tens of milli watts
+  ULONG   uCTDP_Enable:2; // = (uCTDP_Value > uTDP_Value? 2: (uCTDP_Value < uTDP_Value))
+#else
+  ULONG   uCTDP_Enable:2; // = (uCTDP_Value > uTDP_Value? 2: (uCTDP_Value < uTDP_Value))
+  ULONG   uCTDP_Value:14; // Override value in tens of milli watts
+  ULONG   uTDP_Value:14;  // Original TDP value in tens of milli watts
+  ULONG   uReserved:2;
+#endif
+}ATOM_TDP_CONFIG_BITS;
+
+typedef union _ATOM_TDP_CONFIG
+{
+  ATOM_TDP_CONFIG_BITS TDP_config;
+  ULONG            TDP_config_all;
+}ATOM_TDP_CONFIG;
+
+/**********************************************************************************************************************
+  ATOM_FUSION_SYSTEM_INFO_V1 Description
+sIntegratedSysInfo:               refer to ATOM_INTEGRATED_SYSTEM_INFO_V6 definition.
+ulPowerplayTable[128]:            This 512 bytes memory is used to save ATOM_PPLIB_POWERPLAYTABLE3, starting form ulPowerplayTable[0]
+**********************************************************************************************************************/
+
+// this IntegrateSystemInfoTable is used for Trinity APU
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ULONG  ulBootUpEngineClock;
+  ULONG  ulDentistVCOFreq;
+  ULONG  ulBootUpUMAClock;
+  ATOM_CLK_VOLT_CAPABILITY   sDISPCLK_Voltage[4];
+  ULONG  ulBootUpReqDisplayVector;
+  ULONG  ulOtherDisplayMisc;
+  ULONG  ulGPUCapInfo;
+  ULONG  ulSB_MMIO_Base_Addr;
+  USHORT usRequestedPWMFreqInHz;
+  UCHAR  ucHtcTmpLmt;
+  UCHAR  ucHtcHystLmt;
+  ULONG  ulMinEngineClock;
+  ULONG  ulSystemConfig;
+  ULONG  ulCPUCapInfo;
+  USHORT usNBP0Voltage;
+  USHORT usNBP1Voltage;
+  USHORT usBootUpNBVoltage;
+  USHORT usExtDispConnInfoOffset;
+  USHORT usPanelRefreshRateRange;
+  UCHAR  ucMemoryType;
+  UCHAR  ucUMAChannelNumber;
+  UCHAR  strVBIOSMsg[40];
+  ATOM_TDP_CONFIG  asTdpConfig;
+  ULONG  ulReserved[19];
+  ATOM_AVAILABLE_SCLK_LIST   sAvail_SCLK[5];
+  ULONG  ulGMCRestoreResetTime;
+  ULONG  ulMinimumNClk;
+  ULONG  ulIdleNClk;
+  ULONG  ulDDR_DLL_PowerUpTime;
+  ULONG  ulDDR_PLL_PowerUpTime;
+  USHORT usPCIEClkSSPercentage;
+  USHORT usPCIEClkSSType;
+  USHORT usLvdsSSPercentage;
+  USHORT usLvdsSSpreadRateIn10Hz;
+  USHORT usHDMISSPercentage;
+  USHORT usHDMISSpreadRateIn10Hz;
+  USHORT usDVISSPercentage;
+  USHORT usDVISSpreadRateIn10Hz;
+  ULONG  SclkDpmBoostMargin;
+  ULONG  SclkDpmThrottleMargin;
+  USHORT SclkDpmTdpLimitPG;
+  USHORT SclkDpmTdpLimitBoost;
+  ULONG  ulBoostEngineCLock;
+  UCHAR  ulBoostVid_2bit;
+  UCHAR  EnableBoost;
+  USHORT GnbTdpLimit;
+  USHORT usMaxLVDSPclkFreqInSingleLink;
+  UCHAR  ucLvdsMisc;
+  UCHAR  ucTravisLVDSVolAdjust;
+  UCHAR  ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+  UCHAR  ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+  UCHAR  ucLVDSOffToOnDelay_in4Ms;
+  UCHAR  ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+  UCHAR  ucMinAllowedBL_Level;
+  ULONG  ulLCDBitDepthControlVal;
+  ULONG  ulNbpStateMemclkFreq[4];
+  USHORT usNBP2Voltage;
+  USHORT usNBP3Voltage;
+  ULONG  ulNbpStateNClkFreq[4];
+  UCHAR  ucNBDPMEnable;
+  UCHAR  ucReserved[3];
+  UCHAR  ucDPMState0VclkFid;
+  UCHAR  ucDPMState0DclkFid;
+  UCHAR  ucDPMState1VclkFid;
+  UCHAR  ucDPMState1DclkFid;
+  UCHAR  ucDPMState2VclkFid;
+  UCHAR  ucDPMState2DclkFid;
+  UCHAR  ucDPMState3VclkFid;
+  UCHAR  ucDPMState3DclkFid;
+  ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
+}ATOM_INTEGRATED_SYSTEM_INFO_V1_7;
+
+// ulOtherDisplayMisc
+#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT            0x01
+#define INTEGRATED_SYSTEM_INFO__GET_BOOTUP_DISPLAY_CALLBACK_FUNC_SUPPORT  0x02
+#define INTEGRATED_SYSTEM_INFO__GET_EXPANSION_CALLBACK_FUNC_SUPPORT       0x04
+#define INTEGRATED_SYSTEM_INFO__FAST_BOOT_SUPPORT                         0x08
+
+// ulGPUCapInfo
+#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE                0x01
+#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE                               0x02
+#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT                         0x08
+#define SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS                               0x10
+//ulGPUCapInfo[16]=1 indicate SMC firmware is able to support GNB fast resume function, so that driver can call SMC to program most of GNB register during resuming, from ML
+#define SYS_INFO_GPUCAPS__GNB_FAST_RESUME_CAPABLE                         0x00010000
+
+//ulGPUCapInfo[17]=1 indicate battery boost feature is enable, from ML
+#define SYS_INFO_GPUCAPS__BATTERY_BOOST_ENABLE                            0x00020000
+
+/**********************************************************************************************************************
+  ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description
+ulBootUpEngineClock:              VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
+ulDentistVCOFreq:                 Dentist VCO clock in 10kHz unit.
+ulBootUpUMAClock:                 System memory boot up clock frequency in 10Khz unit.
+sDISPCLK_Voltage:                 Report Display clock voltage requirement.
+
+ulBootUpReqDisplayVector:         VBIOS boot up display IDs, following are supported devices in Trinity projects:
+                                  ATOM_DEVICE_CRT1_SUPPORT                  0x0001
+                                  ATOM_DEVICE_DFP1_SUPPORT                  0x0008
+                                  ATOM_DEVICE_DFP6_SUPPORT                  0x0040
+                                  ATOM_DEVICE_DFP2_SUPPORT                  0x0080
+                                  ATOM_DEVICE_DFP3_SUPPORT                  0x0200
+                                  ATOM_DEVICE_DFP4_SUPPORT                  0x0400
+                                  ATOM_DEVICE_DFP5_SUPPORT                  0x0800
+                                  ATOM_DEVICE_LCD1_SUPPORT                  0x0002
+ulOtherDisplayMisc:                 bit[0]=0: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is not supported by SBIOS.
+                                        =1: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is supported by SBIOS.
+                                  bit[1]=0: INT15 callback function Get boot display( ax=4e08, bl=01h) is not supported by SBIOS
+                                        =1: INT15 callback function Get boot display( ax=4e08, bl=01h) is supported by SBIOS
+                                  bit[2]=0: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is not supported by SBIOS
+                                        =1: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is supported by SBIOS
+                                  bit[3]=0: VBIOS fast boot is disable
+                                        =1: VBIOS fast boot is enable. ( VBIOS skip display device detection in every set mode if LCD panel is connect and LID is open)
+ulGPUCapInfo:                     bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
+                                        =1: TMDS/HDMI Coherent Mode use signel PLL mode.
+                                  bit[1]=0: DP mode use cascade PLL mode ( New for Trinity )
+                                        =1: DP mode use single PLL mode
+                                  bit[3]=0: Enable AUX HW mode detection logic
+                                        =1: Disable AUX HW mode detection logic
+
+ulSB_MMIO_Base_Addr:              Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
+
+usRequestedPWMFreqInHz:           When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW).
+                                  Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
+
+                                  When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
+                                  1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
+                                  VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
+                                  Changing BL using VBIOS function is functional in both driver and non-driver present environment;
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+                                  2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
+                                  that BL control from GPU is expected.
+                                  VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
+                                  Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
+                                  it's per platform
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+ucHtcTmpLmt:                      Refer to D18F3x64 bit[22:16], HtcTmpLmt.
+                                  Threshold on value to enter HTC_active state.
+ucHtcHystLmt:                     Refer to D18F3x64 bit[27:24], HtcHystLmt.
+                                  To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
+ulMinEngineClock:                 Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
+ulSystemConfig:                   Bit[0]=0: PCIE Power Gating Disabled
+                                        =1: PCIE Power Gating Enabled
+                                  Bit[1]=0: DDR-DLL shut-down feature disabled.
+                                         1: DDR-DLL shut-down feature enabled.
+                                  Bit[2]=0: DDR-PLL Power down feature disabled.
+                                         1: DDR-PLL Power down feature enabled.
+ulCPUCapInfo:                     TBD
+usNBP0Voltage:                    VID for voltage on NB P0 State
+usNBP1Voltage:                    VID for voltage on NB P1 State
+usNBP2Voltage:                    VID for voltage on NB P2 State
+usNBP3Voltage:                    VID for voltage on NB P3 State
+usBootUpNBVoltage:                Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
+usExtDispConnInfoOffset:          Offset to sExtDispConnInfo inside the structure
+usPanelRefreshRateRange:          Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
+                                  to indicate a range.
+                                  SUPPORTED_LCD_REFRESHRATE_30Hz          0x0004
+                                  SUPPORTED_LCD_REFRESHRATE_40Hz          0x0008
+                                  SUPPORTED_LCD_REFRESHRATE_50Hz          0x0010
+                                  SUPPORTED_LCD_REFRESHRATE_60Hz          0x0020
+ucMemoryType:                     [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
+ucUMAChannelNumber:                 System memory channel numbers.
+ulCSR_M3_ARB_CNTL_DEFAULT[10]:    Arrays with values for CSR M3 arbiter for default
+ulCSR_M3_ARB_CNTL_UVD[10]:        Arrays with values for CSR M3 arbiter for UVD playback.
+ulCSR_M3_ARB_CNTL_FS3D[10]:       Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+sAvail_SCLK[5]:                   Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
+ulGMCRestoreResetTime:            GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
+ulMinimumNClk:                    Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz.
+ulIdleNClk:                       NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
+ulDDR_DLL_PowerUpTime:            DDR PHY DLL power up time. Unit in ns.
+ulDDR_PLL_PowerUpTime:            DDR PHY PLL power up time. Unit in ns.
+usPCIEClkSSPercentage:            PCIE Clock Spread Spectrum Percentage in unit 0.01%; 100 mean 1%.
+usPCIEClkSSType:                  PCIE Clock Spread Spectrum Type. 0 for Down spread(default); 1 for Center spread.
+usLvdsSSPercentage:               LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting.
+usLvdsSSpreadRateIn10Hz:          LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
+usHDMISSPercentage:               HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting.
+usHDMISSpreadRateIn10Hz:          HDMI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting.
+usDVISSPercentage:                DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting.
+usDVISSpreadRateIn10Hz:           DVI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting.
+usMaxLVDSPclkFreqInSingleLink:    Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
+ucLVDSMisc:                       [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
+                                  [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
+                                  [bit2] LVDS 888bit per color mode  =0: 666 bit per color =1:888 bit per color
+                                  [bit3] LVDS parameter override enable  =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
+                                  [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
+                                  [bit5] Travid LVDS output voltage override enable, when =1, use ucTravisLVDSVolAdjust value to overwrite Traivs register LVDS_CTRL_4
+ucTravisLVDSVolAdjust             When ucLVDSMisc[5]=1,it means platform SBIOS want to overwrite TravisLVDSVoltage. Then VBIOS will use ucTravisLVDSVolAdjust
+                                  value to program Travis register LVDS_CTRL_4
+ucLVDSPwrOnSeqDIGONtoDE_in4Ms:    LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ).
+                                  =0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+ucLVDSPwrOnDEtoVARY_BL_in4Ms:     LVDS power up sequence time in unit of 4ms., time delay from DE( data enable ) active to Vary Brightness enable signal active( VARY_BL ).
+                                  =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOffVARY_BLtoDE_in4Ms:    LVDS power down sequence time in unit of 4ms, time delay from data enable ( DE ) signal off to LCDVCC (DIGON) off.
+                                  =0 mean use VBIOS default delay which is 8 ( 32ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOffDEtoDIGON_in4Ms:      LVDS power down sequence time in unit of 4ms, time delay from vary brightness enable signal( VARY_BL) off to data enable ( DE ) signal off.
+                                  =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSOffToOnDelay_in4Ms:         LVDS power down sequence time in unit of 4ms. Time delay from DIGON signal off to DIGON signal active.
+                                  =0 means to use VBIOS default delay which is 125 ( 500ms ).
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms:
+                                  LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active.
+                                  =0 means to use VBIOS default delay which is 0 ( 0ms ).
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms:
+                                  LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off.
+                                  =0 means to use VBIOS default delay which is 0 ( 0ms ).
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucMinAllowedBL_Level:             Lowest LCD backlight PWM level. This is customer platform specific parameters. By default it is 0.
+
+ulNbpStateMemclkFreq[4]:          system memory clock frequncey in unit of 10Khz in different NB pstate.
+
+**********************************************************************************************************************/
+
+// this IntegrateSystemInfoTable is used for Kaveri & Kabini APU
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ULONG  ulBootUpEngineClock;
+  ULONG  ulDentistVCOFreq;
+  ULONG  ulBootUpUMAClock;
+  ATOM_CLK_VOLT_CAPABILITY   sDISPCLK_Voltage[4];
+  ULONG  ulBootUpReqDisplayVector;
+  ULONG  ulVBIOSMisc;
+  ULONG  ulGPUCapInfo;
+  ULONG  ulDISP_CLK2Freq;
+  USHORT usRequestedPWMFreqInHz;
+  UCHAR  ucHtcTmpLmt;
+  UCHAR  ucHtcHystLmt;
+  ULONG  ulReserved2;
+  ULONG  ulSystemConfig;
+  ULONG  ulCPUCapInfo;
+  ULONG  ulReserved3;
+  USHORT usGPUReservedSysMemSize;
+  USHORT usExtDispConnInfoOffset;
+  USHORT usPanelRefreshRateRange;
+  UCHAR  ucMemoryType;
+  UCHAR  ucUMAChannelNumber;
+  UCHAR  strVBIOSMsg[40];
+  ATOM_TDP_CONFIG  asTdpConfig;
+  ULONG  ulReserved[19];
+  ATOM_AVAILABLE_SCLK_LIST   sAvail_SCLK[5];
+  ULONG  ulGMCRestoreResetTime;
+  ULONG  ulReserved4;
+  ULONG  ulIdleNClk;
+  ULONG  ulDDR_DLL_PowerUpTime;
+  ULONG  ulDDR_PLL_PowerUpTime;
+  USHORT usPCIEClkSSPercentage;
+  USHORT usPCIEClkSSType;
+  USHORT usLvdsSSPercentage;
+  USHORT usLvdsSSpreadRateIn10Hz;
+  USHORT usHDMISSPercentage;
+  USHORT usHDMISSpreadRateIn10Hz;
+  USHORT usDVISSPercentage;
+  USHORT usDVISSpreadRateIn10Hz;
+  ULONG  ulGPUReservedSysMemBaseAddrLo;
+  ULONG  ulGPUReservedSysMemBaseAddrHi;
+  ATOM_CLK_VOLT_CAPABILITY   s5thDISPCLK_Voltage;
+  ULONG  ulReserved5;
+  USHORT usMaxLVDSPclkFreqInSingleLink;
+  UCHAR  ucLvdsMisc;
+  UCHAR  ucTravisLVDSVolAdjust;
+  UCHAR  ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+  UCHAR  ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+  UCHAR  ucLVDSOffToOnDelay_in4Ms;
+  UCHAR  ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+  UCHAR  ucMinAllowedBL_Level;
+  ULONG  ulLCDBitDepthControlVal;
+  ULONG  ulNbpStateMemclkFreq[4];
+  ULONG  ulPSPVersion;
+  ULONG  ulNbpStateNClkFreq[4];
+  USHORT usNBPStateVoltage[4];
+  USHORT usBootUpNBVoltage;
+  USHORT usReserved2;
+  ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
+}ATOM_INTEGRATED_SYSTEM_INFO_V1_8;
+
+/**********************************************************************************************************************
+  ATOM_INTEGRATED_SYSTEM_INFO_V1_8 Description
+ulBootUpEngineClock:              VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
+ulDentistVCOFreq:                 Dentist VCO clock in 10kHz unit.
+ulBootUpUMAClock:                 System memory boot up clock frequency in 10Khz unit.
+sDISPCLK_Voltage:                 Report Display clock frequency requirement on GNB voltage(up to 4 voltage levels).
+
+ulBootUpReqDisplayVector:         VBIOS boot up display IDs, following are supported devices in Trinity projects:
+                                  ATOM_DEVICE_CRT1_SUPPORT                  0x0001
+                                  ATOM_DEVICE_DFP1_SUPPORT                  0x0008
+                                  ATOM_DEVICE_DFP6_SUPPORT                  0x0040
+                                  ATOM_DEVICE_DFP2_SUPPORT                  0x0080
+                                  ATOM_DEVICE_DFP3_SUPPORT                  0x0200
+                                  ATOM_DEVICE_DFP4_SUPPORT                  0x0400
+                                  ATOM_DEVICE_DFP5_SUPPORT                  0x0800
+                                  ATOM_DEVICE_LCD1_SUPPORT                  0x0002
+
+ulVBIOSMisc:                       Miscellenous flags for VBIOS requirement and interface
+                                  bit[0]=0: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is not supported by SBIOS.
+                                        =1: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is supported by SBIOS.
+                                  bit[1]=0: INT15 callback function Get boot display( ax=4e08, bl=01h) is not supported by SBIOS
+                                        =1: INT15 callback function Get boot display( ax=4e08, bl=01h) is supported by SBIOS
+                                  bit[2]=0: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is not supported by SBIOS
+                                        =1: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is supported by SBIOS
+                                  bit[3]=0: VBIOS fast boot is disable
+                                        =1: VBIOS fast boot is enable. ( VBIOS skip display device detection in every set mode if LCD panel is connect and LID is open)
+
+ulGPUCapInfo:                     bit[0~2]= Reserved
+                                  bit[3]=0: Enable AUX HW mode detection logic
+                                        =1: Disable AUX HW mode detection logic
+                                  bit[4]=0: Disable DFS bypass feature
+                                        =1: Enable DFS bypass feature
+
+usRequestedPWMFreqInHz:           When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW).
+                                  Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
+
+                                  When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
+                                  1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
+                                  VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
+                                  Changing BL using VBIOS function is functional in both driver and non-driver present environment;
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+                                  2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
+                                  that BL control from GPU is expected.
+                                  VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
+                                  Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
+                                  it's per platform
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+ucHtcTmpLmt:                      Refer to D18F3x64 bit[22:16], HtcTmpLmt. Threshold on value to enter HTC_active state.
+ucHtcHystLmt:                     Refer to D18F3x64 bit[27:24], HtcHystLmt.
+                                  To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
+
+ulSystemConfig:                   Bit[0]=0: PCIE Power Gating Disabled
+                                        =1: PCIE Power Gating Enabled
+                                  Bit[1]=0: DDR-DLL shut-down feature disabled.
+                                         1: DDR-DLL shut-down feature enabled.
+                                  Bit[2]=0: DDR-PLL Power down feature disabled.
+                                         1: DDR-PLL Power down feature enabled.
+                                  Bit[3]=0: GNB DPM is disabled
+                                        =1: GNB DPM is enabled
+ulCPUCapInfo:                     TBD
+
+usExtDispConnInfoOffset:          Offset to sExtDispConnInfo inside the structure
+usPanelRefreshRateRange:          Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
+                                  to indicate a range.
+                                  SUPPORTED_LCD_REFRESHRATE_30Hz          0x0004
+                                  SUPPORTED_LCD_REFRESHRATE_40Hz          0x0008
+                                  SUPPORTED_LCD_REFRESHRATE_50Hz          0x0010
+                                  SUPPORTED_LCD_REFRESHRATE_60Hz          0x0020
+
+ucMemoryType:                     [3:0]=1:DDR1;=2:DDR2;=3:DDR3;=5:GDDR5; [7:4] is reserved.
+ucUMAChannelNumber:                 System memory channel numbers.
+
+strVBIOSMsg[40]:                  VBIOS boot up customized message string
+
+sAvail_SCLK[5]:                   Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
+
+ulGMCRestoreResetTime:            GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
+ulIdleNClk:                       NCLK speed while memory runs in self-refresh state, used to calculate self-refresh latency. Unit in 10kHz.
+ulDDR_DLL_PowerUpTime:            DDR PHY DLL power up time. Unit in ns.
+ulDDR_PLL_PowerUpTime:            DDR PHY PLL power up time. Unit in ns.
+
+usPCIEClkSSPercentage:            PCIE Clock Spread Spectrum Percentage in unit 0.01%; 100 mean 1%.
+usPCIEClkSSType:                  PCIE Clock Spread Spectrum Type. 0 for Down spread(default); 1 for Center spread.
+usLvdsSSPercentage:               LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting.
+usLvdsSSpreadRateIn10Hz:          LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
+usHDMISSPercentage:               HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting.
+usHDMISSpreadRateIn10Hz:          HDMI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting.
+usDVISSPercentage:                DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting.
+usDVISSpreadRateIn10Hz:           DVI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting.
+
+usGPUReservedSysMemSize:          Reserved system memory size for ACP engine in APU GNB, units in MB. 0/2/4MB based on CMOS options, current default could be 0MB. KV only, not on KB.
+ulGPUReservedSysMemBaseAddrLo:    Low 32 bits base address to the reserved system memory.
+ulGPUReservedSysMemBaseAddrHi:    High 32 bits base address to the reserved system memory.
+
+usMaxLVDSPclkFreqInSingleLink:    Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
+ucLVDSMisc:                       [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
+                                  [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
+                                  [bit2] LVDS 888bit per color mode  =0: 666 bit per color =1:888 bit per color
+                                  [bit3] LVDS parameter override enable  =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
+                                  [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
+                                  [bit5] Travid LVDS output voltage override enable, when =1, use ucTravisLVDSVolAdjust value to overwrite Traivs register LVDS_CTRL_4
+ucTravisLVDSVolAdjust             When ucLVDSMisc[5]=1,it means platform SBIOS want to overwrite TravisLVDSVoltage. Then VBIOS will use ucTravisLVDSVolAdjust
+                                  value to program Travis register LVDS_CTRL_4
+ucLVDSPwrOnSeqDIGONtoDE_in4Ms:
+                                  LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ).
+                                  =0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+ucLVDSPwrOnDEtoVARY_BL_in4Ms:
+                                  LVDS power up sequence time in unit of 4ms., time delay from DE( data enable ) active to Vary Brightness enable signal active( VARY_BL ).
+                                  =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+ucLVDSPwrOffVARY_BLtoDE_in4Ms:
+                                  LVDS power down sequence time in unit of 4ms, time delay from data enable ( DE ) signal off to LCDVCC (DIGON) off.
+                                  =0 mean use VBIOS default delay which is 8 ( 32ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+ucLVDSPwrOffDEtoDIGON_in4Ms:
+                                   LVDS power down sequence time in unit of 4ms, time delay from vary brightness enable signal( VARY_BL) off to data enable ( DE ) signal off.
+                                  =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+ucLVDSOffToOnDelay_in4Ms:
+                                  LVDS power down sequence time in unit of 4ms. Time delay from DIGON signal off to DIGON signal active.
+                                  =0 means to use VBIOS default delay which is 125 ( 500ms ).
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms:
+                                  LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active.
+                                  =0 means to use VBIOS default delay which is 0 ( 0ms ).
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms:
+                                  LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off.
+                                  =0 means to use VBIOS default delay which is 0 ( 0ms ).
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+ucMinAllowedBL_Level:             Lowest LCD backlight PWM level. This is customer platform specific parameters. By default it is 0.
+
+ulLCDBitDepthControlVal:          GPU display control encoder bit dither control setting, used to program register mmFMT_BIT_DEPTH_CONTROL
+
+ulNbpStateMemclkFreq[4]:          system memory clock frequncey in unit of 10Khz in different NB P-State(P0, P1, P2 & P3).
+ulNbpStateNClkFreq[4]:            NB P-State NClk frequency in different NB P-State
+usNBPStateVoltage[4]:             NB P-State (P0/P1 & P2/P3) voltage; NBP3 refers to lowes voltage
+usBootUpNBVoltage:                NB P-State voltage during boot up before driver loaded
+sExtDispConnInfo:                 Display connector information table provided to VBIOS
+
+**********************************************************************************************************************/
+
+// this Table is used for Kaveri/Kabini APU
+typedef struct _ATOM_FUSION_SYSTEM_INFO_V2
+{
+  ATOM_INTEGRATED_SYSTEM_INFO_V1_8    sIntegratedSysInfo;       // refer to ATOM_INTEGRATED_SYSTEM_INFO_V1_8 definition
+  ULONG                               ulPowerplayTable[128];    // Update comments here to link new powerplay table definition structure
+}ATOM_FUSION_SYSTEM_INFO_V2;
+
+
+typedef struct _ATOM_I2C_REG_INFO
+{
+  UCHAR ucI2cRegIndex;
+  UCHAR ucI2cRegVal;
+}ATOM_I2C_REG_INFO;
+
+// this IntegrateSystemInfoTable is used for Carrizo
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ULONG  ulBootUpEngineClock;
+  ULONG  ulDentistVCOFreq;
+  ULONG  ulBootUpUMAClock;
+  ATOM_CLK_VOLT_CAPABILITY   sDISPCLK_Voltage[4];       // no longer used, keep it as is to avoid driver compiling error
+  ULONG  ulBootUpReqDisplayVector;
+  ULONG  ulVBIOSMisc;
+  ULONG  ulGPUCapInfo;
+  ULONG  ulDISP_CLK2Freq;
+  USHORT usRequestedPWMFreqInHz;
+  UCHAR  ucHtcTmpLmt;
+  UCHAR  ucHtcHystLmt;
+  ULONG  ulReserved2;
+  ULONG  ulSystemConfig;
+  ULONG  ulCPUCapInfo;
+  ULONG  ulReserved3;
+  USHORT usGPUReservedSysMemSize;
+  USHORT usExtDispConnInfoOffset;
+  USHORT usPanelRefreshRateRange;
+  UCHAR  ucMemoryType;
+  UCHAR  ucUMAChannelNumber;
+  UCHAR  strVBIOSMsg[40];
+  ATOM_TDP_CONFIG  asTdpConfig;
+  UCHAR  ucExtHDMIReDrvSlvAddr;
+  UCHAR  ucExtHDMIReDrvRegNum;
+  ATOM_I2C_REG_INFO asExtHDMIRegSetting[9];
+  ULONG  ulReserved[2];
+  ATOM_CLK_VOLT_CAPABILITY_V2   sDispClkVoltageMapping[8];
+  ATOM_AVAILABLE_SCLK_LIST   sAvail_SCLK[5];            // no longer used, keep it as is to avoid driver compiling error
+  ULONG  ulGMCRestoreResetTime;
+  ULONG  ulReserved4;
+  ULONG  ulIdleNClk;
+  ULONG  ulDDR_DLL_PowerUpTime;
+  ULONG  ulDDR_PLL_PowerUpTime;
+  USHORT usPCIEClkSSPercentage;
+  USHORT usPCIEClkSSType;
+  USHORT usLvdsSSPercentage;
+  USHORT usLvdsSSpreadRateIn10Hz;
+  USHORT usHDMISSPercentage;
+  USHORT usHDMISSpreadRateIn10Hz;
+  USHORT usDVISSPercentage;
+  USHORT usDVISSpreadRateIn10Hz;
+  ULONG  ulGPUReservedSysMemBaseAddrLo;
+  ULONG  ulGPUReservedSysMemBaseAddrHi;
+  ULONG  ulReserved5[3];
+  USHORT usMaxLVDSPclkFreqInSingleLink;
+  UCHAR  ucLvdsMisc;
+  UCHAR  ucTravisLVDSVolAdjust;
+  UCHAR  ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+  UCHAR  ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+  UCHAR  ucLVDSOffToOnDelay_in4Ms;
+  UCHAR  ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+  UCHAR  ucMinAllowedBL_Level;
+  ULONG  ulLCDBitDepthControlVal;
+  ULONG  ulNbpStateMemclkFreq[4];          // only 2 level is changed.
+  ULONG  ulPSPVersion;
+  ULONG  ulNbpStateNClkFreq[4];
+  USHORT usNBPStateVoltage[4];
+  USHORT usBootUpNBVoltage;
+  UCHAR  ucEDPv1_4VSMode;
+  UCHAR  ucReserved2;
+  ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
+}ATOM_INTEGRATED_SYSTEM_INFO_V1_9;
+
+
+// definition for ucEDPv1_4VSMode
+#define EDP_VS_LEGACY_MODE                  0
+#define EDP_VS_LOW_VDIFF_MODE               1
+#define EDP_VS_HIGH_VDIFF_MODE              2
+#define EDP_VS_STRETCH_MODE                 3
+#define EDP_VS_SINGLE_VDIFF_MODE            4
+#define EDP_VS_VARIABLE_PREM_MODE           5
+
+
+// this IntegrateSystemInfoTable is used for Carrizo
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_10
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ULONG  ulBootUpEngineClock;
+  ULONG  ulDentistVCOFreq;
+  ULONG  ulBootUpUMAClock;
+  ULONG  ulReserved0[8];
+  ULONG  ulBootUpReqDisplayVector;
+  ULONG  ulVBIOSMisc;
+  ULONG  ulGPUCapInfo;
+  ULONG  ulReserved1;
+  USHORT usRequestedPWMFreqInHz;
+  UCHAR  ucHtcTmpLmt;
+  UCHAR  ucHtcHystLmt;
+  ULONG  ulReserved2;
+  ULONG  ulSystemConfig;
+  ULONG  ulCPUCapInfo;
+  ULONG  ulReserved3;
+  USHORT usGPUReservedSysMemSize;
+  USHORT usExtDispConnInfoOffset;
+  USHORT usPanelRefreshRateRange;
+  UCHAR  ucMemoryType;
+  UCHAR  ucUMAChannelNumber;
+  UCHAR  strVBIOSMsg[40];
+  ATOM_TDP_CONFIG  asTdpConfig;
+  ULONG  ulReserved[7];
+  ATOM_CLK_VOLT_CAPABILITY_V2   sDispClkVoltageMapping[8];
+  ULONG  ulReserved6[10];
+  ULONG  ulGMCRestoreResetTime;
+  ULONG  ulReserved4;
+  ULONG  ulIdleNClk;
+  ULONG  ulDDR_DLL_PowerUpTime;
+  ULONG  ulDDR_PLL_PowerUpTime;
+  USHORT usPCIEClkSSPercentage;
+  USHORT usPCIEClkSSType;
+  USHORT usLvdsSSPercentage;
+  USHORT usLvdsSSpreadRateIn10Hz;
+  USHORT usHDMISSPercentage;
+  USHORT usHDMISSpreadRateIn10Hz;
+  USHORT usDVISSPercentage;
+  USHORT usDVISSpreadRateIn10Hz;
+  ULONG  ulGPUReservedSysMemBaseAddrLo;
+  ULONG  ulGPUReservedSysMemBaseAddrHi;
+  ULONG  ulReserved5[3];
+  USHORT usMaxLVDSPclkFreqInSingleLink;
+  UCHAR  ucLvdsMisc;
+  UCHAR  ucTravisLVDSVolAdjust;
+  UCHAR  ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+  UCHAR  ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+  UCHAR  ucLVDSOffToOnDelay_in4Ms;
+  UCHAR  ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+  UCHAR  ucMinAllowedBL_Level;
+  ULONG  ulLCDBitDepthControlVal;
+  ULONG  ulNbpStateMemclkFreq[2];
+  ULONG  ulReserved7[2];
+  ULONG  ulPSPVersion;
+  ULONG  ulNbpStateNClkFreq[4];
+  USHORT usNBPStateVoltage[4];
+  USHORT usBootUpNBVoltage;
+  UCHAR  ucEDPv1_4VSMode;
+  UCHAR  ucReserved2;
+  ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
+}ATOM_INTEGRATED_SYSTEM_INFO_V1_10;
+
+/**************************************************************************/
+// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design
+//Memory SS Info Table
+//Define Memory Clock SS chip ID
+#define ICS91719  1
+#define ICS91720  2
+
+//Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol
+typedef struct _ATOM_I2C_DATA_RECORD
+{
+  UCHAR         ucNunberOfBytes;                                              //Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop"
+  UCHAR         ucI2CData[1];                                                 //I2C data in bytes, should be less than 16 bytes usually
+}ATOM_I2C_DATA_RECORD;
+
+
+//Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information
+typedef struct _ATOM_I2C_DEVICE_SETUP_INFO
+{
+  ATOM_I2C_ID_CONFIG_ACCESS       sucI2cId;               //I2C line and HW/SW assisted cap.
+  UCHAR                              ucSSChipID;             //SS chip being used
+  UCHAR                              ucSSChipSlaveAddr;      //Slave Address to set up this SS chip
+  UCHAR                           ucNumOfI2CDataRecords;  //number of data block
+  ATOM_I2C_DATA_RECORD            asI2CData[1];
+}ATOM_I2C_DEVICE_SETUP_INFO;
+
+//==========================================================================================
+typedef struct  _ATOM_ASIC_MVDD_INFO
+{
+  ATOM_COMMON_TABLE_HEADER         sHeader;
+  ATOM_I2C_DEVICE_SETUP_INFO      asI2CSetup[1];
+}ATOM_ASIC_MVDD_INFO;
+
+//==========================================================================================
+#define ATOM_MCLK_SS_INFO         ATOM_ASIC_MVDD_INFO
+
+//==========================================================================================
+/**************************************************************************/
+
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT
+{
+   ULONG                        ulTargetClockRange;                  //Clock Out frequence (VCO ), in unit of 10Khz
+  USHORT              usSpreadSpectrumPercentage;      //in unit of 0.01%
+   USHORT                     usSpreadRateInKhz;                  //in unit of kHz, modulation freq
+  UCHAR               ucClockIndication;                 //Indicate which clock source needs SS
+   UCHAR                        ucSpreadSpectrumMode;               //Bit1=0 Down Spread,=1 Center Spread.
+   UCHAR                        ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT;
+
+//Define ucClockIndication, SW uses the IDs below to search if the SS is requried/enabled on a clock branch/signal type.
+//SS is not required or enabled if a match is not found.
+#define ASIC_INTERNAL_MEMORY_SS            1
+#define ASIC_INTERNAL_ENGINE_SS            2
+#define ASIC_INTERNAL_UVD_SS             3
+#define ASIC_INTERNAL_SS_ON_TMDS         4
+#define ASIC_INTERNAL_SS_ON_HDMI         5
+#define ASIC_INTERNAL_SS_ON_LVDS         6
+#define ASIC_INTERNAL_SS_ON_DP           7
+#define ASIC_INTERNAL_SS_ON_DCPLL        8
+#define ASIC_EXTERNAL_SS_ON_DP_CLOCK     9
+#define ASIC_INTERNAL_VCE_SS             10
+#define ASIC_INTERNAL_GPUPLL_SS          11
+
+
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
+{
+   ULONG                        ulTargetClockRange;                  //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
+                                                    //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
+  USHORT              usSpreadSpectrumPercentage;      //in unit of 0.01%
+   USHORT                     usSpreadRateIn10Hz;                  //in unit of 10Hz, modulation freq
+  UCHAR               ucClockIndication;                 //Indicate which clock source needs SS
+   UCHAR                        ucSpreadSpectrumMode;               //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
+   UCHAR                        ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT_V2;
+
+//ucSpreadSpectrumMode
+//#define ATOM_SS_DOWN_SPREAD_MODE_MASK          0x00000000
+//#define ATOM_SS_DOWN_SPREAD_MODE               0x00000000
+//#define ATOM_SS_CENTRE_SPREAD_MODE_MASK        0x00000001
+//#define ATOM_SS_CENTRE_SPREAD_MODE             0x00000001
+//#define ATOM_INTERNAL_SS_MASK                  0x00000000
+//#define ATOM_EXTERNAL_SS_MASK                  0x00000002
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO
+{
+  ATOM_COMMON_TABLE_HEADER         sHeader;
+  ATOM_ASIC_SS_ASSIGNMENT            asSpreadSpectrum[4];
+}ATOM_ASIC_INTERNAL_SS_INFO;
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V2
+{
+  ATOM_COMMON_TABLE_HEADER         sHeader;
+  ATOM_ASIC_SS_ASSIGNMENT_V2        asSpreadSpectrum[1];      //this is point only.
+}ATOM_ASIC_INTERNAL_SS_INFO_V2;
+
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3
+{
+   ULONG                        ulTargetClockRange;                  //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
+                                                    //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
+  USHORT              usSpreadSpectrumPercentage;      //in unit of 0.01% or 0.001%, decided by ucSpreadSpectrumMode bit4
+   USHORT                     usSpreadRateIn10Hz;                  //in unit of 10Hz, modulation freq
+  UCHAR               ucClockIndication;                 //Indicate which clock source needs SS
+   UCHAR                        ucSpreadSpectrumMode;               //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
+   UCHAR                        ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT_V3;
+
+//ATOM_ASIC_SS_ASSIGNMENT_V3.ucSpreadSpectrumMode
+#define SS_MODE_V3_CENTRE_SPREAD_MASK             0x01
+#define SS_MODE_V3_EXTERNAL_SS_MASK               0x02
+#define SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK    0x10
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
+{
+  ATOM_COMMON_TABLE_HEADER         sHeader;
+  ATOM_ASIC_SS_ASSIGNMENT_V3        asSpreadSpectrum[1];      //this is pointer only.
+}ATOM_ASIC_INTERNAL_SS_INFO_V3;
+
+
+//==============================Scratch Pad Definition Portion===============================
+#define ATOM_DEVICE_CONNECT_INFO_DEF  0
+#define ATOM_ROM_LOCATION_DEF         1
+#define ATOM_TV_STANDARD_DEF          2
+#define ATOM_ACTIVE_INFO_DEF          3
+#define ATOM_LCD_INFO_DEF             4
+#define ATOM_DOS_REQ_INFO_DEF         5
+#define ATOM_ACC_CHANGE_INFO_DEF      6
+#define ATOM_DOS_MODE_INFO_DEF        7
+#define ATOM_I2C_CHANNEL_STATUS_DEF   8
+#define ATOM_I2C_CHANNEL_STATUS1_DEF  9
+#define ATOM_INTERNAL_TIMER_DEF       10
+
+// BIOS_0_SCRATCH Definition
+#define ATOM_S0_CRT1_MONO               0x00000001L
+#define ATOM_S0_CRT1_COLOR              0x00000002L
+#define ATOM_S0_CRT1_MASK               (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR)
+
+#define ATOM_S0_TV1_COMPOSITE_A         0x00000004L
+#define ATOM_S0_TV1_SVIDEO_A            0x00000008L
+#define ATOM_S0_TV1_MASK_A              (ATOM_S0_TV1_COMPOSITE_A+ATOM_S0_TV1_SVIDEO_A)
+
+#define ATOM_S0_CV_A                    0x00000010L
+#define ATOM_S0_CV_DIN_A                0x00000020L
+#define ATOM_S0_CV_MASK_A               (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A)
+
+
+#define ATOM_S0_CRT2_MONO               0x00000100L
+#define ATOM_S0_CRT2_COLOR              0x00000200L
+#define ATOM_S0_CRT2_MASK               (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR)
+
+#define ATOM_S0_TV1_COMPOSITE           0x00000400L
+#define ATOM_S0_TV1_SVIDEO              0x00000800L
+#define ATOM_S0_TV1_SCART               0x00004000L
+#define ATOM_S0_TV1_MASK                (ATOM_S0_TV1_COMPOSITE+ATOM_S0_TV1_SVIDEO+ATOM_S0_TV1_SCART)
+
+#define ATOM_S0_CV                      0x00001000L
+#define ATOM_S0_CV_DIN                  0x00002000L
+#define ATOM_S0_CV_MASK                 (ATOM_S0_CV+ATOM_S0_CV_DIN)
+
+#define ATOM_S0_DFP1                    0x00010000L
+#define ATOM_S0_DFP2                    0x00020000L
+#define ATOM_S0_LCD1                    0x00040000L
+#define ATOM_S0_LCD2                    0x00080000L
+#define ATOM_S0_DFP6                    0x00100000L
+#define ATOM_S0_DFP3                    0x00200000L
+#define ATOM_S0_DFP4                    0x00400000L
+#define ATOM_S0_DFP5                    0x00800000L
+
+
+#define ATOM_S0_DFP_MASK                ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5 | ATOM_S0_DFP6
+
+#define ATOM_S0_FAD_REGISTER_BUG        0x02000000L // If set, indicates we are running a PCIE asic with
+                                                    // the FAD/HDP reg access bug.  Bit is read by DAL, this is obsolete from RV5xx
+
+#define ATOM_S0_THERMAL_STATE_MASK      0x1C000000L
+#define ATOM_S0_THERMAL_STATE_SHIFT     26
+
+#define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L
+#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29
+
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC     1
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC     2
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LIT2AC 4
+
+//Byte aligned defintion for BIOS usage
+#define ATOM_S0_CRT1_MONOb0             0x01
+#define ATOM_S0_CRT1_COLORb0            0x02
+#define ATOM_S0_CRT1_MASKb0             (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0)
+
+#define ATOM_S0_TV1_COMPOSITEb0         0x04
+#define ATOM_S0_TV1_SVIDEOb0            0x08
+#define ATOM_S0_TV1_MASKb0              (ATOM_S0_TV1_COMPOSITEb0+ATOM_S0_TV1_SVIDEOb0)
+
+#define ATOM_S0_CVb0                    0x10
+#define ATOM_S0_CV_DINb0                0x20
+#define ATOM_S0_CV_MASKb0               (ATOM_S0_CVb0+ATOM_S0_CV_DINb0)
+
+#define ATOM_S0_CRT2_MONOb1             0x01
+#define ATOM_S0_CRT2_COLORb1            0x02
+#define ATOM_S0_CRT2_MASKb1             (ATOM_S0_CRT2_MONOb1+ATOM_S0_CRT2_COLORb1)
+
+#define ATOM_S0_TV1_COMPOSITEb1         0x04
+#define ATOM_S0_TV1_SVIDEOb1            0x08
+#define ATOM_S0_TV1_SCARTb1             0x40
+#define ATOM_S0_TV1_MASKb1              (ATOM_S0_TV1_COMPOSITEb1+ATOM_S0_TV1_SVIDEOb1+ATOM_S0_TV1_SCARTb1)
+
+#define ATOM_S0_CVb1                    0x10
+#define ATOM_S0_CV_DINb1                0x20
+#define ATOM_S0_CV_MASKb1               (ATOM_S0_CVb1+ATOM_S0_CV_DINb1)
+
+#define ATOM_S0_DFP1b2                  0x01
+#define ATOM_S0_DFP2b2                  0x02
+#define ATOM_S0_LCD1b2                  0x04
+#define ATOM_S0_LCD2b2                  0x08
+#define ATOM_S0_DFP6b2                  0x10
+#define ATOM_S0_DFP3b2                  0x20
+#define ATOM_S0_DFP4b2                  0x40
+#define ATOM_S0_DFP5b2                  0x80
+
+
+#define ATOM_S0_THERMAL_STATE_MASKb3    0x1C
+#define ATOM_S0_THERMAL_STATE_SHIFTb3   2
+
+#define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0
+#define ATOM_S0_LCD1_SHIFT              18
+
+// BIOS_1_SCRATCH Definition
+#define ATOM_S1_ROM_LOCATION_MASK       0x0000FFFFL
+#define ATOM_S1_PCI_BUS_DEV_MASK        0xFFFF0000L
+
+//   BIOS_2_SCRATCH Definition
+#define ATOM_S2_TV1_STANDARD_MASK       0x0000000FL
+#define ATOM_S2_CURRENT_BL_LEVEL_MASK   0x0000FF00L
+#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT  8
+
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK       0x0C000000L
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE     0x10000000L
+
+#define ATOM_S2_DEVICE_DPMS_STATE       0x00010000L
+#define ATOM_S2_VRI_BRIGHT_ENABLE       0x20000000L
+
+#define ATOM_S2_DISPLAY_ROTATION_0_DEGREE     0x0
+#define ATOM_S2_DISPLAY_ROTATION_90_DEGREE    0x1
+#define ATOM_S2_DISPLAY_ROTATION_180_DEGREE   0x2
+#define ATOM_S2_DISPLAY_ROTATION_270_DEGREE   0x3
+#define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30
+#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK   0xC0000000L
+
+
+//Byte aligned defintion for BIOS usage
+#define ATOM_S2_TV1_STANDARD_MASKb0     0x0F
+#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF
+#define ATOM_S2_DEVICE_DPMS_STATEb2     0x01
+
+#define ATOM_S2_TMDS_COHERENT_MODEb3    0x10          // used by VBIOS code only, use coherent mode for TMDS/HDMI mode
+#define ATOM_S2_VRI_BRIGHT_ENABLEb3     0x20
+#define ATOM_S2_ROTATION_STATE_MASKb3   0xC0
+
+
+// BIOS_3_SCRATCH Definition
+#define ATOM_S3_CRT1_ACTIVE             0x00000001L
+#define ATOM_S3_LCD1_ACTIVE             0x00000002L
+#define ATOM_S3_TV1_ACTIVE              0x00000004L
+#define ATOM_S3_DFP1_ACTIVE             0x00000008L
+#define ATOM_S3_CRT2_ACTIVE             0x00000010L
+#define ATOM_S3_LCD2_ACTIVE             0x00000020L
+#define ATOM_S3_DFP6_ACTIVE                     0x00000040L
+#define ATOM_S3_DFP2_ACTIVE             0x00000080L
+#define ATOM_S3_CV_ACTIVE               0x00000100L
+#define ATOM_S3_DFP3_ACTIVE                     0x00000200L
+#define ATOM_S3_DFP4_ACTIVE                     0x00000400L
+#define ATOM_S3_DFP5_ACTIVE                     0x00000800L
+
+
+#define ATOM_S3_DEVICE_ACTIVE_MASK      0x00000FFFL
+
+#define ATOM_S3_LCD_FULLEXPANSION_ACTIVE         0x00001000L
+#define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L
+
+#define ATOM_S3_CRT1_CRTC_ACTIVE        0x00010000L
+#define ATOM_S3_LCD1_CRTC_ACTIVE        0x00020000L
+#define ATOM_S3_TV1_CRTC_ACTIVE         0x00040000L
+#define ATOM_S3_DFP1_CRTC_ACTIVE        0x00080000L
+#define ATOM_S3_CRT2_CRTC_ACTIVE        0x00100000L
+#define ATOM_S3_LCD2_CRTC_ACTIVE        0x00200000L
+#define ATOM_S3_DFP6_CRTC_ACTIVE        0x00400000L
+#define ATOM_S3_DFP2_CRTC_ACTIVE        0x00800000L
+#define ATOM_S3_CV_CRTC_ACTIVE          0x01000000L
+#define ATOM_S3_DFP3_CRTC_ACTIVE            0x02000000L
+#define ATOM_S3_DFP4_CRTC_ACTIVE            0x04000000L
+#define ATOM_S3_DFP5_CRTC_ACTIVE            0x08000000L
+
+
+#define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L
+#define ATOM_S3_ASIC_GUI_ENGINE_HUNG    0x20000000L
+//Below two definitions are not supported in pplib, but in the old powerplay in DAL
+#define ATOM_S3_ALLOW_FAST_PWR_SWITCH   0x40000000L
+#define ATOM_S3_RQST_GPU_USE_MIN_PWR    0x80000000L
+
+
+
+//Byte aligned defintion for BIOS usage
+#define ATOM_S3_CRT1_ACTIVEb0           0x01
+#define ATOM_S3_LCD1_ACTIVEb0           0x02
+#define ATOM_S3_TV1_ACTIVEb0            0x04
+#define ATOM_S3_DFP1_ACTIVEb0           0x08
+#define ATOM_S3_CRT2_ACTIVEb0           0x10
+#define ATOM_S3_LCD2_ACTIVEb0           0x20
+#define ATOM_S3_DFP6_ACTIVEb0           0x40
+#define ATOM_S3_DFP2_ACTIVEb0           0x80
+#define ATOM_S3_CV_ACTIVEb1             0x01
+#define ATOM_S3_DFP3_ACTIVEb1                  0x02
+#define ATOM_S3_DFP4_ACTIVEb1                  0x04
+#define ATOM_S3_DFP5_ACTIVEb1                  0x08
+
+
+#define ATOM_S3_ACTIVE_CRTC1w0          0xFFF
+
+#define ATOM_S3_CRT1_CRTC_ACTIVEb2      0x01
+#define ATOM_S3_LCD1_CRTC_ACTIVEb2      0x02
+#define ATOM_S3_TV1_CRTC_ACTIVEb2       0x04
+#define ATOM_S3_DFP1_CRTC_ACTIVEb2      0x08
+#define ATOM_S3_CRT2_CRTC_ACTIVEb2      0x10
+#define ATOM_S3_LCD2_CRTC_ACTIVEb2      0x20
+#define ATOM_S3_DFP6_CRTC_ACTIVEb2      0x40
+#define ATOM_S3_DFP2_CRTC_ACTIVEb2      0x80
+#define ATOM_S3_CV_CRTC_ACTIVEb3        0x01
+#define ATOM_S3_DFP3_CRTC_ACTIVEb3         0x02
+#define ATOM_S3_DFP4_CRTC_ACTIVEb3         0x04
+#define ATOM_S3_DFP5_CRTC_ACTIVEb3         0x08
+
+
+#define ATOM_S3_ACTIVE_CRTC2w1          0xFFF
+
+
+// BIOS_4_SCRATCH Definition
+#define ATOM_S4_LCD1_PANEL_ID_MASK      0x000000FFL
+#define ATOM_S4_LCD1_REFRESH_MASK       0x0000FF00L
+#define ATOM_S4_LCD1_REFRESH_SHIFT      8
+
+//Byte aligned defintion for BIOS usage
+#define ATOM_S4_LCD1_PANEL_ID_MASKb0    0x0FF
+#define ATOM_S4_LCD1_REFRESH_MASKb1     ATOM_S4_LCD1_PANEL_ID_MASKb0
+#define ATOM_S4_VRAM_INFO_MASKb2        ATOM_S4_LCD1_PANEL_ID_MASKb0
+
+// BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!!
+#define ATOM_S5_DOS_REQ_CRT1b0          0x01
+#define ATOM_S5_DOS_REQ_LCD1b0          0x02
+#define ATOM_S5_DOS_REQ_TV1b0           0x04
+#define ATOM_S5_DOS_REQ_DFP1b0          0x08
+#define ATOM_S5_DOS_REQ_CRT2b0          0x10
+#define ATOM_S5_DOS_REQ_LCD2b0          0x20
+#define ATOM_S5_DOS_REQ_DFP6b0          0x40
+#define ATOM_S5_DOS_REQ_DFP2b0          0x80
+#define ATOM_S5_DOS_REQ_CVb1            0x01
+#define ATOM_S5_DOS_REQ_DFP3b1          0x02
+#define ATOM_S5_DOS_REQ_DFP4b1          0x04
+#define ATOM_S5_DOS_REQ_DFP5b1          0x08
+
+
+#define ATOM_S5_DOS_REQ_DEVICEw0        0x0FFF
+
+#define ATOM_S5_DOS_REQ_CRT1            0x0001
+#define ATOM_S5_DOS_REQ_LCD1            0x0002
+#define ATOM_S5_DOS_REQ_TV1             0x0004
+#define ATOM_S5_DOS_REQ_DFP1            0x0008
+#define ATOM_S5_DOS_REQ_CRT2            0x0010
+#define ATOM_S5_DOS_REQ_LCD2            0x0020
+#define ATOM_S5_DOS_REQ_DFP6            0x0040
+#define ATOM_S5_DOS_REQ_DFP2            0x0080
+#define ATOM_S5_DOS_REQ_CV              0x0100
+#define ATOM_S5_DOS_REQ_DFP3            0x0200
+#define ATOM_S5_DOS_REQ_DFP4            0x0400
+#define ATOM_S5_DOS_REQ_DFP5            0x0800
+
+#define ATOM_S5_DOS_FORCE_CRT1b2        ATOM_S5_DOS_REQ_CRT1b0
+#define ATOM_S5_DOS_FORCE_TV1b2         ATOM_S5_DOS_REQ_TV1b0
+#define ATOM_S5_DOS_FORCE_CRT2b2        ATOM_S5_DOS_REQ_CRT2b0
+#define ATOM_S5_DOS_FORCE_CVb3          ATOM_S5_DOS_REQ_CVb1
+#define ATOM_S5_DOS_FORCE_DEVICEw1      (ATOM_S5_DOS_FORCE_CRT1b2+ATOM_S5_DOS_FORCE_TV1b2+ATOM_S5_DOS_FORCE_CRT2b2+\
+                                        (ATOM_S5_DOS_FORCE_CVb3<<8))
+// BIOS_6_SCRATCH Definition
+#define ATOM_S6_DEVICE_CHANGE           0x00000001L
+#define ATOM_S6_SCALER_CHANGE           0x00000002L
+#define ATOM_S6_LID_CHANGE              0x00000004L
+#define ATOM_S6_DOCKING_CHANGE          0x00000008L
+#define ATOM_S6_ACC_MODE                0x00000010L
+#define ATOM_S6_EXT_DESKTOP_MODE        0x00000020L
+#define ATOM_S6_LID_STATE               0x00000040L
+#define ATOM_S6_DOCK_STATE              0x00000080L
+#define ATOM_S6_CRITICAL_STATE          0x00000100L
+#define ATOM_S6_HW_I2C_BUSY_STATE       0x00000200L
+#define ATOM_S6_THERMAL_STATE_CHANGE    0x00000400L
+#define ATOM_S6_INTERRUPT_SET_BY_BIOS   0x00000800L
+#define ATOM_S6_REQ_LCD_EXPANSION_FULL         0x00001000L //Normal expansion Request bit for LCD
+#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO  0x00002000L //Aspect ratio expansion Request bit for LCD
+
+#define ATOM_S6_DISPLAY_STATE_CHANGE    0x00004000L        //This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion
+#define ATOM_S6_I2C_STATE_CHANGE        0x00008000L        //This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion
+
+#define ATOM_S6_ACC_REQ_CRT1            0x00010000L
+#define ATOM_S6_ACC_REQ_LCD1            0x00020000L
+#define ATOM_S6_ACC_REQ_TV1             0x00040000L
+#define ATOM_S6_ACC_REQ_DFP1            0x00080000L
+#define ATOM_S6_ACC_REQ_CRT2            0x00100000L
+#define ATOM_S6_ACC_REQ_LCD2            0x00200000L
+#define ATOM_S6_ACC_REQ_DFP6            0x00400000L
+#define ATOM_S6_ACC_REQ_DFP2            0x00800000L
+#define ATOM_S6_ACC_REQ_CV              0x01000000L
+#define ATOM_S6_ACC_REQ_DFP3                  0x02000000L
+#define ATOM_S6_ACC_REQ_DFP4                  0x04000000L
+#define ATOM_S6_ACC_REQ_DFP5                  0x08000000L
+
+#define ATOM_S6_ACC_REQ_MASK                0x0FFF0000L
+#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE    0x10000000L
+#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH    0x20000000L
+#define ATOM_S6_VRI_BRIGHTNESS_CHANGE       0x40000000L
+#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK  0x80000000L
+
+//Byte aligned defintion for BIOS usage
+#define ATOM_S6_DEVICE_CHANGEb0         0x01
+#define ATOM_S6_SCALER_CHANGEb0         0x02
+#define ATOM_S6_LID_CHANGEb0            0x04
+#define ATOM_S6_DOCKING_CHANGEb0        0x08
+#define ATOM_S6_ACC_MODEb0              0x10
+#define ATOM_S6_EXT_DESKTOP_MODEb0      0x20
+#define ATOM_S6_LID_STATEb0             0x40
+#define ATOM_S6_DOCK_STATEb0            0x80
+#define ATOM_S6_CRITICAL_STATEb1        0x01
+#define ATOM_S6_HW_I2C_BUSY_STATEb1     0x02
+#define ATOM_S6_THERMAL_STATE_CHANGEb1  0x04
+#define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08
+#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1        0x10
+#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20
+
+#define ATOM_S6_ACC_REQ_CRT1b2          0x01
+#define ATOM_S6_ACC_REQ_LCD1b2          0x02
+#define ATOM_S6_ACC_REQ_TV1b2           0x04
+#define ATOM_S6_ACC_REQ_DFP1b2          0x08
+#define ATOM_S6_ACC_REQ_CRT2b2          0x10
+#define ATOM_S6_ACC_REQ_LCD2b2          0x20
+#define ATOM_S6_ACC_REQ_DFP6b2          0x40
+#define ATOM_S6_ACC_REQ_DFP2b2          0x80
+#define ATOM_S6_ACC_REQ_CVb3            0x01
+#define ATOM_S6_ACC_REQ_DFP3b3          0x02
+#define ATOM_S6_ACC_REQ_DFP4b3          0x04
+#define ATOM_S6_ACC_REQ_DFP5b3          0x08
+
+#define ATOM_S6_ACC_REQ_DEVICEw1        ATOM_S5_DOS_REQ_DEVICEw0
+#define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10
+#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCHb3 0x20
+#define ATOM_S6_VRI_BRIGHTNESS_CHANGEb3    0x40
+#define ATOM_S6_CONFIG_DISPLAY_CHANGEb3    0x80
+
+#define ATOM_S6_DEVICE_CHANGE_SHIFT             0
+#define ATOM_S6_SCALER_CHANGE_SHIFT             1
+#define ATOM_S6_LID_CHANGE_SHIFT                2
+#define ATOM_S6_DOCKING_CHANGE_SHIFT            3
+#define ATOM_S6_ACC_MODE_SHIFT                  4
+#define ATOM_S6_EXT_DESKTOP_MODE_SHIFT          5
+#define ATOM_S6_LID_STATE_SHIFT                 6
+#define ATOM_S6_DOCK_STATE_SHIFT                7
+#define ATOM_S6_CRITICAL_STATE_SHIFT            8
+#define ATOM_S6_HW_I2C_BUSY_STATE_SHIFT         9
+#define ATOM_S6_THERMAL_STATE_CHANGE_SHIFT      10
+#define ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT     11
+#define ATOM_S6_REQ_SCALER_SHIFT                12
+#define ATOM_S6_REQ_SCALER_ARATIO_SHIFT         13
+#define ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT      14
+#define ATOM_S6_I2C_STATE_CHANGE_SHIFT          15
+#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT  28
+#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH_SHIFT  29
+#define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT     30
+#define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT     31
+
+// BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!!
+#define ATOM_S7_DOS_MODE_TYPEb0             0x03
+#define ATOM_S7_DOS_MODE_VGAb0              0x00
+#define ATOM_S7_DOS_MODE_VESAb0             0x01
+#define ATOM_S7_DOS_MODE_EXTb0              0x02
+#define ATOM_S7_DOS_MODE_PIXEL_DEPTHb0      0x0C
+#define ATOM_S7_DOS_MODE_PIXEL_FORMATb0     0xF0
+#define ATOM_S7_DOS_8BIT_DAC_ENb1           0x01
+#define ATOM_S7_ASIC_INIT_COMPLETEb1        0x02
+#define ATOM_S7_ASIC_INIT_COMPLETE_MASK     0x00000200
+#define ATOM_S7_DOS_MODE_NUMBERw1           0x0FFFF
+
+#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT       8
+
+// BIOS_8_SCRATCH Definition
+#define ATOM_S8_I2C_CHANNEL_BUSY_MASK       0x00000FFFF
+#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK     0x0FFFF0000
+
+#define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT      0
+#define ATOM_S8_I2C_ENGINE_BUSY_SHIFT       16
+
+// BIOS_9_SCRATCH Definition
+#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK
+#define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK  0x0000FFFF
+#endif
+#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK
+#define ATOM_S9_I2C_CHANNEL_ABORTED_MASK    0xFFFF0000
+#endif
+#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT
+#define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0
+#endif
+#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT
+#define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT   16
+#endif
+
+
+#define ATOM_FLAG_SET                         0x20
+#define ATOM_FLAG_CLEAR                       0
+#define CLEAR_ATOM_S6_ACC_MODE                ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR)
+#define SET_ATOM_S6_DEVICE_CHANGE             ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE     ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_SCALER_CHANGE             ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_LID_CHANGE                ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET)
+
+#define SET_ATOM_S6_LID_STATE                 ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_LID_STATE               ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_DOCK_CHANGE                   ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_DOCK_STATE                ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_DOCK_STATE              ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_THERMAL_STATE_CHANGE      ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE  ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS     ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET)
+
+#define SET_ATOM_S6_CRITICAL_STATE            ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_CRITICAL_STATE          ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_REQ_SCALER                ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_REQ_SCALER              ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR )
+
+#define SET_ATOM_S6_REQ_SCALER_ARATIO         ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET )
+#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO       ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR )
+
+#define SET_ATOM_S6_I2C_STATE_CHANGE          ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
+
+#define SET_ATOM_S6_DISPLAY_STATE_CHANGE      ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
+
+#define SET_ATOM_S6_DEVICE_RECONFIG           ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S0_LCD1                    ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 )|  ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR )
+#define SET_ATOM_S7_DOS_8BIT_DAC_EN           ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET )
+#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN         ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR )
+
+/****************************************************************************/
+//Portion II: Definitinos only used in Driver
+/****************************************************************************/
+
+// Macros used by driver
+
+#ifdef __cplusplus
+#define GetIndexIntoMasterTable(MasterOrData, FieldName) ((reinterpret_cast<char*>(&(static_cast<ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*>(0))->FieldName)-static_cast<char*>(0))/sizeof(USHORT))
+
+#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableFormatRevision )&0x3F)
+#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET)  (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableContentRevision)&0x3F)
+#else // not __cplusplus
+#define   GetIndexIntoMasterTable(MasterOrData, FieldName) (((char*)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*)0)->FieldName)-(char*)0)/sizeof(USHORT))
+
+#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F)
+#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET)  ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F)
+#endif // __cplusplus
+
+#define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION
+#define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION
+
+/****************************************************************************/
+//Portion III: Definitinos only used in VBIOS
+/****************************************************************************/
+#define ATOM_DAC_SRC               0x80
+#define ATOM_SRC_DAC1               0
+#define ATOM_SRC_DAC2               0x80
+
+
+
+typedef struct _MEMORY_PLLINIT_PARAMETERS
+{
+  ULONG ulTargetMemoryClock; //In 10Khz unit
+  UCHAR   ucAction;                //not define yet
+  UCHAR   ucFbDiv_Hi;             //Fbdiv Hi byte
+  UCHAR   ucFbDiv;                //FB value
+  UCHAR   ucPostDiv;             //Post div
+}MEMORY_PLLINIT_PARAMETERS;
+
+#define MEMORY_PLLINIT_PS_ALLOCATION  MEMORY_PLLINIT_PARAMETERS
+
+
+#define   GPIO_PIN_WRITE                                       0x01
+#define   GPIO_PIN_READ                                          0x00
+
+typedef struct  _GPIO_PIN_CONTROL_PARAMETERS
+{
+  UCHAR ucGPIO_ID;           //return value, read from GPIO pins
+  UCHAR ucGPIOBitShift;        //define which bit in uGPIOBitVal need to be update
+   UCHAR ucGPIOBitVal;           //Set/Reset corresponding bit defined in ucGPIOBitMask
+  UCHAR ucAction;                 //=GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write
+}GPIO_PIN_CONTROL_PARAMETERS;
+
+typedef struct _ENABLE_SCALER_PARAMETERS
+{
+  UCHAR ucScaler;            // ATOM_SCALER1, ATOM_SCALER2
+  UCHAR ucEnable;            // ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION
+  UCHAR ucTVStandard;        //
+  UCHAR ucPadding[1];
+}ENABLE_SCALER_PARAMETERS;
+#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS
+
+//ucEnable:
+#define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION    0
+#define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION  1
+#define SCALER_ENABLE_2TAP_ALPHA_MODE               2
+#define SCALER_ENABLE_MULTITAP_MODE                 3
+
+typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS
+{
+  ULONG  usHWIconHorzVertPosn;        // Hardware Icon Vertical position
+  UCHAR  ucHWIconVertOffset;          // Hardware Icon Vertical offset
+  UCHAR  ucHWIconHorzOffset;          // Hardware Icon Horizontal offset
+  UCHAR  ucSelection;                 // ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2
+  UCHAR  ucEnable;                    // ATOM_ENABLE or ATOM_DISABLE
+}ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS;
+
+typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION
+{
+  ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS  sEnableIcon;
+  ENABLE_CRTC_PARAMETERS                  sReserved;
+}ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS
+{
+  USHORT usHight;                     // Image Hight
+  USHORT usWidth;                     // Image Width
+  UCHAR  ucSurface;                   // Surface 1 or 2
+  UCHAR  ucPadding[3];
+}ENABLE_GRAPH_SURFACE_PARAMETERS;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2
+{
+  USHORT usHight;                     // Image Hight
+  USHORT usWidth;                     // Image Width
+  UCHAR  ucSurface;                   // Surface 1 or 2
+  UCHAR  ucEnable;                    // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR  ucPadding[2];
+}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3
+{
+  USHORT usHight;                     // Image Hight
+  USHORT usWidth;                     // Image Width
+  UCHAR  ucSurface;                   // Surface 1 or 2
+  UCHAR  ucEnable;                    // ATOM_ENABLE or ATOM_DISABLE
+  USHORT usDeviceId;                  // Active Device Id for this surface. If no device, set to 0.
+}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4
+{
+  USHORT usHight;                     // Image Hight
+  USHORT usWidth;                     // Image Width
+  USHORT usGraphPitch;
+  UCHAR  ucColorDepth;
+  UCHAR  ucPixelFormat;
+  UCHAR  ucSurface;                   // Surface 1 or 2
+  UCHAR  ucEnable;                    // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR  ucModeType;
+  UCHAR  ucReserved;
+}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4;
+
+// ucEnable
+#define ATOM_GRAPH_CONTROL_SET_PITCH             0x0f
+#define ATOM_GRAPH_CONTROL_SET_DISP_START        0x10
+
+typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION
+{
+  ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;
+  ENABLE_YUV_PS_ALLOCATION        sReserved; // Don't set this one
+}ENABLE_GRAPH_SURFACE_PS_ALLOCATION;
+
+typedef struct _MEMORY_CLEAN_UP_PARAMETERS
+{
+  USHORT  usMemoryStart;                //in 8Kb boundry, offset from memory base address
+  USHORT  usMemorySize;                 //8Kb blocks aligned
+}MEMORY_CLEAN_UP_PARAMETERS;
+
+#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS
+
+typedef struct  _GET_DISPLAY_SURFACE_SIZE_PARAMETERS
+{
+  USHORT  usX_Size;                     //When use as input parameter, usX_Size indicates which CRTC
+  USHORT  usY_Size;
+}GET_DISPLAY_SURFACE_SIZE_PARAMETERS;
+
+typedef struct  _GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2
+{
+  union{
+    USHORT  usX_Size;                     //When use as input parameter, usX_Size indicates which CRTC
+    USHORT  usSurface;
+  };
+  USHORT usY_Size;
+  USHORT usDispXStart;
+  USHORT usDispYStart;
+}GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2;
+
+
+typedef struct _PALETTE_DATA_CONTROL_PARAMETERS_V3
+{
+  UCHAR  ucLutId;
+  UCHAR  ucAction;
+  USHORT usLutStartIndex;
+  USHORT usLutLength;
+  USHORT usLutOffsetInVram;
+}PALETTE_DATA_CONTROL_PARAMETERS_V3;
+
+// ucAction:
+#define PALETTE_DATA_AUTO_FILL            1
+#define PALETTE_DATA_READ                 2
+#define PALETTE_DATA_WRITE                3
+
+
+typedef struct _INTERRUPT_SERVICE_PARAMETERS_V2
+{
+  UCHAR  ucInterruptId;
+  UCHAR  ucServiceId;
+  UCHAR  ucStatus;
+  UCHAR  ucReserved;
+}INTERRUPT_SERVICE_PARAMETER_V2;
+
+// ucInterruptId
+#define HDP1_INTERRUPT_ID                 1
+#define HDP2_INTERRUPT_ID                 2
+#define HDP3_INTERRUPT_ID                 3
+#define HDP4_INTERRUPT_ID                 4
+#define HDP5_INTERRUPT_ID                 5
+#define HDP6_INTERRUPT_ID                 6
+#define SW_INTERRUPT_ID                   11
+
+// ucAction
+#define INTERRUPT_SERVICE_GEN_SW_INT      1
+#define INTERRUPT_SERVICE_GET_STATUS      2
+
+ // ucStatus
+#define INTERRUPT_STATUS__INT_TRIGGER     1
+#define INTERRUPT_STATUS__HPD_HIGH        2
+
+typedef struct _EFUSE_INPUT_PARAMETER
+{
+  USHORT usEfuseIndex;
+  UCHAR  ucBitShift;
+  UCHAR  ucBitLength;
+}EFUSE_INPUT_PARAMETER;
+
+// ReadEfuseValue command table input/output parameter
+typedef union _READ_EFUSE_VALUE_PARAMETER
+{
+  EFUSE_INPUT_PARAMETER sEfuse;
+  ULONG                 ulEfuseValue;
+}READ_EFUSE_VALUE_PARAMETER;
+
+typedef struct _INDIRECT_IO_ACCESS
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  UCHAR                    IOAccessSequence[256];
+} INDIRECT_IO_ACCESS;
+
+#define INDIRECT_READ              0x00
+#define INDIRECT_WRITE             0x80
+
+#define INDIRECT_IO_MM             0
+#define INDIRECT_IO_PLL            1
+#define INDIRECT_IO_MC             2
+#define INDIRECT_IO_PCIE           3
+#define INDIRECT_IO_PCIEP          4
+#define INDIRECT_IO_NBMISC         5
+#define INDIRECT_IO_SMU            5
+
+#define INDIRECT_IO_PLL_READ       INDIRECT_IO_PLL   | INDIRECT_READ
+#define INDIRECT_IO_PLL_WRITE      INDIRECT_IO_PLL   | INDIRECT_WRITE
+#define INDIRECT_IO_MC_READ        INDIRECT_IO_MC    | INDIRECT_READ
+#define INDIRECT_IO_MC_WRITE       INDIRECT_IO_MC    | INDIRECT_WRITE
+#define INDIRECT_IO_PCIE_READ      INDIRECT_IO_PCIE  | INDIRECT_READ
+#define INDIRECT_IO_PCIE_WRITE     INDIRECT_IO_PCIE  | INDIRECT_WRITE
+#define INDIRECT_IO_PCIEP_READ     INDIRECT_IO_PCIEP | INDIRECT_READ
+#define INDIRECT_IO_PCIEP_WRITE    INDIRECT_IO_PCIEP | INDIRECT_WRITE
+#define INDIRECT_IO_NBMISC_READ    INDIRECT_IO_NBMISC | INDIRECT_READ
+#define INDIRECT_IO_NBMISC_WRITE   INDIRECT_IO_NBMISC | INDIRECT_WRITE
+#define INDIRECT_IO_SMU_READ       INDIRECT_IO_SMU | INDIRECT_READ
+#define INDIRECT_IO_SMU_WRITE      INDIRECT_IO_SMU | INDIRECT_WRITE
+
+
+typedef struct _ATOM_OEM_INFO
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+}ATOM_OEM_INFO;
+
+typedef struct _ATOM_TV_MODE
+{
+   UCHAR   ucVMode_Num;           //Video mode number
+   UCHAR   ucTV_Mode_Num;         //Internal TV mode number
+}ATOM_TV_MODE;
+
+typedef struct _ATOM_BIOS_INT_TVSTD_MODE
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+   USHORT   usTV_Mode_LUT_Offset;   // Pointer to standard to internal number conversion table
+   USHORT   usTV_FIFO_Offset;        // Pointer to FIFO entry table
+   USHORT   usNTSC_Tbl_Offset;      // Pointer to SDTV_Mode_NTSC table
+   USHORT   usPAL_Tbl_Offset;        // Pointer to SDTV_Mode_PAL table
+   USHORT   usCV_Tbl_Offset;        // Pointer to SDTV_Mode_PAL table
+}ATOM_BIOS_INT_TVSTD_MODE;
+
+
+typedef struct _ATOM_TV_MODE_SCALER_PTR
+{
+   USHORT   ucFilter0_Offset;      //Pointer to filter format 0 coefficients
+   USHORT   usFilter1_Offset;      //Pointer to filter format 0 coefficients
+   UCHAR   ucTV_Mode_Num;
+}ATOM_TV_MODE_SCALER_PTR;
+
+typedef struct _ATOM_STANDARD_VESA_TIMING
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  ATOM_DTD_FORMAT              aModeTimings[16];      // 16 is not the real array number, just for initial allocation
+}ATOM_STANDARD_VESA_TIMING;
+
+
+typedef struct _ATOM_STD_FORMAT
+{
+  USHORT    usSTD_HDisp;
+  USHORT    usSTD_VDisp;
+  USHORT    usSTD_RefreshRate;
+  USHORT    usReserved;
+}ATOM_STD_FORMAT;
+
+typedef struct _ATOM_VESA_TO_EXTENDED_MODE
+{
+  USHORT  usVESA_ModeNumber;
+  USHORT  usExtendedModeNumber;
+}ATOM_VESA_TO_EXTENDED_MODE;
+
+typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76];
+}ATOM_VESA_TO_INTENAL_MODE_LUT;
+
+/*************** ATOM Memory Related Data Structure ***********************/
+typedef struct _ATOM_MEMORY_VENDOR_BLOCK{
+   UCHAR                                    ucMemoryType;
+   UCHAR                                    ucMemoryVendor;
+   UCHAR                                    ucAdjMCId;
+   UCHAR                                    ucDynClkId;
+   ULONG                                    ulDllResetClkRange;
+}ATOM_MEMORY_VENDOR_BLOCK;
+
+
+typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG{
+#if ATOM_BIG_ENDIAN
+	ULONG												ucMemBlkId:8;
+	ULONG												ulMemClockRange:24;
+#else
+	ULONG												ulMemClockRange:24;
+	ULONG												ucMemBlkId:8;
+#endif
+}ATOM_MEMORY_SETTING_ID_CONFIG;
+
+typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS
+{
+  ATOM_MEMORY_SETTING_ID_CONFIG slAccess;
+  ULONG                         ulAccess;
+}ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS;
+
+
+typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK{
+   ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS  ulMemoryID;
+   ULONG                                 aulMemData[1];
+}ATOM_MEMORY_SETTING_DATA_BLOCK;
+
+
+typedef struct _ATOM_INIT_REG_INDEX_FORMAT{
+    USHORT usRegIndex;                                     // MC register index
+    UCHAR  ucPreRegDataLength;                             // offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf
+}ATOM_INIT_REG_INDEX_FORMAT;
+
+
+typedef struct _ATOM_INIT_REG_BLOCK{
+   USHORT                           usRegIndexTblSize;          //size of asRegIndexBuf
+   USHORT                           usRegDataBlkSize;           //size of ATOM_MEMORY_SETTING_DATA_BLOCK
+   ATOM_INIT_REG_INDEX_FORMAT       asRegIndexBuf[1];
+   ATOM_MEMORY_SETTING_DATA_BLOCK   asRegDataBuf[1];
+}ATOM_INIT_REG_BLOCK;
+
+#define END_OF_REG_INDEX_BLOCK  0x0ffff
+#define END_OF_REG_DATA_BLOCK   0x00000000
+#define ATOM_INIT_REG_MASK_FLAG 0x80               //Not used in BIOS
+#define CLOCK_RANGE_HIGHEST     0x00ffffff
+
+#define VALUE_DWORD             SIZEOF ULONG
+#define VALUE_SAME_AS_ABOVE     0
+#define VALUE_MASK_DWORD        0x84
+
+#define INDEX_ACCESS_RANGE_BEGIN       (VALUE_DWORD + 1)
+#define INDEX_ACCESS_RANGE_END          (INDEX_ACCESS_RANGE_BEGIN + 1)
+#define VALUE_INDEX_ACCESS_SINGLE       (INDEX_ACCESS_RANGE_END + 1)
+//#define ACCESS_MCIODEBUGIND            0x40       //defined in BIOS code
+#define ACCESS_PLACEHOLDER             0x80
+
+
+typedef struct _ATOM_MC_INIT_PARAM_TABLE
+{
+  ATOM_COMMON_TABLE_HEADER      sHeader;
+  USHORT                        usAdjustARB_SEQDataOffset;
+  USHORT                        usMCInitMemTypeTblOffset;
+  USHORT                        usMCInitCommonTblOffset;
+  USHORT                        usMCInitPowerDownTblOffset;
+  ULONG                         ulARB_SEQDataBuf[32];
+  ATOM_INIT_REG_BLOCK           asMCInitMemType;
+  ATOM_INIT_REG_BLOCK           asMCInitCommon;
+}ATOM_MC_INIT_PARAM_TABLE;
+
+
+typedef struct _ATOM_REG_INIT_SETTING
+{
+  USHORT  usRegIndex;
+  ULONG   ulRegValue;
+}ATOM_REG_INIT_SETTING;
+
+typedef struct _ATOM_MC_INIT_PARAM_TABLE_V2_1
+{
+  ATOM_COMMON_TABLE_HEADER      sHeader;
+  ULONG                         ulMCUcodeVersion;
+  ULONG                         ulMCUcodeRomStartAddr;
+  ULONG                         ulMCUcodeLength;
+  USHORT                        usMcRegInitTableOffset;     // offset of ATOM_REG_INIT_SETTING array for MC core register settings.
+  USHORT                        usReserved;                 // offset of ATOM_INIT_REG_BLOCK for MC SEQ/PHY regsiter setting
+}ATOM_MC_INIT_PARAM_TABLE_V2_1;
+
+
+#define _4Mx16              0x2
+#define _4Mx32              0x3
+#define _8Mx16              0x12
+#define _8Mx32              0x13
+#define _8Mx128             0x15
+#define _16Mx16             0x22
+#define _16Mx32             0x23
+#define _16Mx128            0x25
+#define _32Mx16             0x32
+#define _32Mx32             0x33
+#define _32Mx128            0x35
+#define _64Mx32             0x43
+#define _64Mx8              0x41
+#define _64Mx16             0x42
+#define _128Mx8             0x51
+#define _128Mx16            0x52
+#define _128Mx32            0x53
+#define _256Mx8             0x61
+#define _256Mx16            0x62
+#define _512Mx8             0x71
+
+
+#define SAMSUNG             0x1
+#define INFINEON            0x2
+#define ELPIDA              0x3
+#define ETRON               0x4
+#define NANYA               0x5
+#define HYNIX               0x6
+#define MOSEL               0x7
+#define WINBOND             0x8
+#define ESMT                0x9
+#define MICRON              0xF
+
+#define QIMONDA             INFINEON
+#define PROMOS              MOSEL
+#define KRETON              INFINEON
+#define ELIXIR              NANYA
+#define MEZZA               ELPIDA
+
+
+/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
+
+#define UCODE_ROM_START_ADDRESS      0x1b800
+#define   UCODE_SIGNATURE         0x4375434d // 'MCuC' - MC uCode
+
+//uCode block header for reference
+
+typedef struct _MCuCodeHeader
+{
+  ULONG  ulSignature;
+  UCHAR  ucRevision;
+  UCHAR  ucChecksum;
+  UCHAR  ucReserved1;
+  UCHAR  ucReserved2;
+  USHORT usParametersLength;
+  USHORT usUCodeLength;
+  USHORT usReserved1;
+  USHORT usReserved2;
+} MCuCodeHeader;
+
+//////////////////////////////////////////////////////////////////////////////////
+
+#define ATOM_MAX_NUMBER_OF_VRAM_MODULE   16
+
+#define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK   0xF
+typedef struct _ATOM_VRAM_MODULE_V1
+{
+  ULONG                      ulReserved;
+  USHORT                     usEMRSValue;
+  USHORT                     usMRSValue;
+  USHORT                     usReserved;
+  UCHAR                      ucExtMemoryID;     // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR                      ucMemoryType;      // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved;
+  UCHAR                      ucMemoryVenderID;  // Predefined,never change across designs or memory type/vender
+  UCHAR                      ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32...
+  UCHAR                      ucRow;             // Number of Row,in power of 2;
+  UCHAR                      ucColumn;          // Number of Column,in power of 2;
+  UCHAR                      ucBank;            // Nunber of Bank;
+  UCHAR                      ucRank;            // Number of Rank, in power of 2
+  UCHAR                      ucChannelNum;      // Number of channel;
+  UCHAR                      ucChannelConfig;   // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2
+  UCHAR                      ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data;
+  UCHAR                      ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data;
+  UCHAR                      ucReserved[2];
+}ATOM_VRAM_MODULE_V1;
+
+
+typedef struct _ATOM_VRAM_MODULE_V2
+{
+  ULONG                      ulReserved;
+  ULONG                      ulFlags;              // To enable/disable functionalities based on memory type
+  ULONG                      ulEngineClock;     // Override of default engine clock for particular memory type
+  ULONG                      ulMemoryClock;     // Override of default memory clock for particular memory type
+  USHORT                     usEMRS2Value;      // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+  USHORT                     usEMRS3Value;      // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+  USHORT                     usEMRSValue;
+  USHORT                     usMRSValue;
+  USHORT                     usReserved;
+  UCHAR                      ucExtMemoryID;     // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR                      ucMemoryType;      // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now;
+  UCHAR                      ucMemoryVenderID;  // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed
+  UCHAR                      ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32...
+  UCHAR                      ucRow;             // Number of Row,in power of 2;
+  UCHAR                      ucColumn;          // Number of Column,in power of 2;
+  UCHAR                      ucBank;            // Nunber of Bank;
+  UCHAR                      ucRank;            // Number of Rank, in power of 2
+  UCHAR                      ucChannelNum;      // Number of channel;
+  UCHAR                      ucChannelConfig;   // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2
+  UCHAR                      ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data;
+  UCHAR                      ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data;
+  UCHAR                      ucRefreshRateFactor;
+  UCHAR                      ucReserved[3];
+}ATOM_VRAM_MODULE_V2;
+
+
+typedef   struct _ATOM_MEMORY_TIMING_FORMAT
+{
+   ULONG                     ulClkRange;            // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
+  union{
+    USHORT                   usMRS;                 // mode register
+    USHORT                   usDDR3_MR0;
+  };
+  union{
+    USHORT                   usEMRS;                  // extended mode register
+    USHORT                   usDDR3_MR1;
+  };
+   UCHAR                     ucCL;                    // CAS latency
+   UCHAR                     ucWL;                    // WRITE Latency
+   UCHAR                     uctRAS;                  // tRAS
+   UCHAR                     uctRC;                   // tRC
+   UCHAR                     uctRFC;                  // tRFC
+   UCHAR                     uctRCDR;                 // tRCDR
+   UCHAR                     uctRCDW;                 // tRCDW
+   UCHAR                     uctRP;                   // tRP
+   UCHAR                     uctRRD;                  // tRRD
+   UCHAR                     uctWR;                   // tWR
+   UCHAR                     uctWTR;                  // tWTR
+   UCHAR                     uctPDIX;                 // tPDIX
+   UCHAR                     uctFAW;                  // tFAW
+   UCHAR                     uctAOND;                 // tAOND
+  union
+  {
+    struct {
+       UCHAR                                  ucflag;                  // flag to control memory timing calculation. bit0= control EMRS2 Infineon
+       UCHAR                                  ucReserved;
+    };
+    USHORT                   usDDR3_MR2;
+  };
+}ATOM_MEMORY_TIMING_FORMAT;
+
+
+typedef   struct _ATOM_MEMORY_TIMING_FORMAT_V1
+{
+   ULONG                      ulClkRange;            // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
+   USHORT                     usMRS;                 // mode register
+   USHORT                     usEMRS;                // extended mode register
+   UCHAR                      ucCL;                  // CAS latency
+   UCHAR                      ucWL;                  // WRITE Latency
+   UCHAR                      uctRAS;                // tRAS
+   UCHAR                      uctRC;                 // tRC
+   UCHAR                      uctRFC;                // tRFC
+   UCHAR                      uctRCDR;               // tRCDR
+   UCHAR                      uctRCDW;               // tRCDW
+   UCHAR                      uctRP;                 // tRP
+   UCHAR                      uctRRD;                // tRRD
+   UCHAR                      uctWR;                 // tWR
+   UCHAR                      uctWTR;                // tWTR
+   UCHAR                      uctPDIX;               // tPDIX
+   UCHAR                      uctFAW;                // tFAW
+   UCHAR                      uctAOND;               // tAOND
+   UCHAR                      ucflag;                // flag to control memory timing calculation. bit0= control EMRS2 Infineon
+////////////////////////////////////GDDR parameters///////////////////////////////////
+   UCHAR                      uctCCDL;               //
+   UCHAR                      uctCRCRL;              //
+   UCHAR                      uctCRCWL;              //
+   UCHAR                      uctCKE;                //
+   UCHAR                      uctCKRSE;              //
+   UCHAR                      uctCKRSX;              //
+   UCHAR                      uctFAW32;              //
+   UCHAR                      ucMR5lo;               //
+   UCHAR                      ucMR5hi;               //
+   UCHAR                      ucTerminator;
+}ATOM_MEMORY_TIMING_FORMAT_V1;
+
+
+
+
+typedef   struct _ATOM_MEMORY_TIMING_FORMAT_V2
+{
+   ULONG                                  ulClkRange;            // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
+   USHORT                               usMRS;                     // mode register
+   USHORT                               usEMRS;                  // extended mode register
+   UCHAR                                  ucCL;                     // CAS latency
+   UCHAR                                  ucWL;                     // WRITE Latency
+   UCHAR                                  uctRAS;                  // tRAS
+   UCHAR                                  uctRC;                     // tRC
+   UCHAR                                  uctRFC;                  // tRFC
+   UCHAR                                  uctRCDR;                  // tRCDR
+   UCHAR                                  uctRCDW;                  // tRCDW
+   UCHAR                                  uctRP;                     // tRP
+   UCHAR                                  uctRRD;                  // tRRD
+   UCHAR                                  uctWR;                     // tWR
+   UCHAR                                  uctWTR;                  // tWTR
+   UCHAR                                  uctPDIX;                  // tPDIX
+   UCHAR                                  uctFAW;                  // tFAW
+   UCHAR                                  uctAOND;                  // tAOND
+   UCHAR                                  ucflag;                  // flag to control memory timing calculation. bit0= control EMRS2 Infineon
+////////////////////////////////////GDDR parameters///////////////////////////////////
+   UCHAR                                  uctCCDL;                  //
+   UCHAR                                  uctCRCRL;                  //
+   UCHAR                                  uctCRCWL;                  //
+   UCHAR                                  uctCKE;                  //
+   UCHAR                                  uctCKRSE;                  //
+   UCHAR                                  uctCKRSX;                  //
+   UCHAR                                  uctFAW32;                  //
+   UCHAR                                  ucMR4lo;               //
+   UCHAR                                  ucMR4hi;               //
+   UCHAR                                  ucMR5lo;               //
+   UCHAR                                  ucMR5hi;               //
+   UCHAR                                  ucTerminator;
+   UCHAR                                  ucReserved;
+}ATOM_MEMORY_TIMING_FORMAT_V2;
+
+
+typedef   struct _ATOM_MEMORY_FORMAT
+{
+   ULONG                       ulDllDisClock;     // memory DLL will be disable when target memory clock is below this clock
+  union{
+    USHORT                     usEMRS2Value;      // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+    USHORT                     usDDR3_Reserved;   // Not used for DDR3 memory
+  };
+  union{
+    USHORT                     usEMRS3Value;      // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+    USHORT                     usDDR3_MR3;        // Used for DDR3 memory
+  };
+  UCHAR                        ucMemoryType;      // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now;
+  UCHAR                        ucMemoryVenderID;  // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed
+  UCHAR                        ucRow;             // Number of Row,in power of 2;
+  UCHAR                        ucColumn;          // Number of Column,in power of 2;
+  UCHAR                        ucBank;            // Nunber of Bank;
+  UCHAR                        ucRank;            // Number of Rank, in power of 2
+  UCHAR                        ucBurstSize;           // burst size, 0= burst size=4  1= burst size=8
+  UCHAR                        ucDllDisBit;           // position of DLL Enable/Disable bit in EMRS ( Extended Mode Register )
+  UCHAR                        ucRefreshRateFactor;   // memory refresh rate in unit of ms
+  UCHAR                        ucDensity;             // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+  UCHAR                        ucPreamble;            // [7:4] Write Preamble, [3:0] Read Preamble
+  UCHAR                        ucMemAttrib;           // Memory Device Addribute, like RDBI/WDBI etc
+  ATOM_MEMORY_TIMING_FORMAT    asMemTiming[5];        // Memory Timing block sort from lower clock to higher clock
+}ATOM_MEMORY_FORMAT;
+
+
+typedef struct _ATOM_VRAM_MODULE_V3
+{
+  ULONG                      ulChannelMapCfg;     // board dependent paramenter:Channel combination
+  USHORT                     usSize;              // size of ATOM_VRAM_MODULE_V3
+  USHORT                     usDefaultMVDDQ;      // board dependent parameter:Default Memory Core Voltage
+  USHORT                     usDefaultMVDDC;      // board dependent parameter:Default Memory IO Voltage
+  UCHAR                      ucExtMemoryID;       // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR                      ucChannelNum;        // board dependent parameter:Number of channel;
+  UCHAR                      ucChannelSize;       // board dependent parameter:32bit or 64bit
+  UCHAR                      ucVREFI;             // board dependnt parameter: EXT or INT +160mv to -140mv
+  UCHAR                      ucNPL_RT;            // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+  UCHAR                      ucFlag;              // To enable/disable functionalities based on memory type
+  ATOM_MEMORY_FORMAT         asMemory;            // describ all of video memory parameters from memory spec
+}ATOM_VRAM_MODULE_V3;
+
+
+//ATOM_VRAM_MODULE_V3.ucNPL_RT
+#define NPL_RT_MASK                                         0x0f
+#define BATTERY_ODT_MASK                                    0xc0
+
+#define ATOM_VRAM_MODULE       ATOM_VRAM_MODULE_V3
+
+typedef struct _ATOM_VRAM_MODULE_V4
+{
+  ULONG     ulChannelMapCfg;                   // board dependent parameter: Channel combination
+  USHORT  usModuleSize;                     // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+  USHORT  usPrivateReserved;                // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+  USHORT  usReserved;
+  UCHAR   ucExtMemoryID;                      // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR   ucMemoryType;                     // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+  UCHAR   ucChannelNum;                     // Number of channels present in this module config
+  UCHAR   ucChannelWidth;                   // 0 - 32 bits; 1 - 64 bits
+   UCHAR   ucDensity;                        // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+   UCHAR     ucFlag;                                  // To enable/disable functionalities based on memory type
+   UCHAR     ucMisc;                                  // bit0: 0 - single rank; 1 - dual rank;   bit2: 0 - burstlength 4, 1 - burstlength 8
+  UCHAR      ucVREFI;                          // board dependent parameter
+  UCHAR   ucNPL_RT;                         // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+  UCHAR      ucPreamble;                       // [7:4] Write Preamble, [3:0] Read Preamble
+  UCHAR   ucMemorySize;                     // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+  UCHAR   ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+  union{
+    USHORT   usEMRS2Value;                   // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+    USHORT  usDDR3_Reserved;
+  };
+  union{
+    USHORT   usEMRS3Value;                   // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+    USHORT  usDDR3_MR3;                     // Used for DDR3 memory
+  };
+  UCHAR   ucMemoryVenderID;                    // Predefined, If not predefined, vendor detection table gets executed
+  UCHAR     ucRefreshRateFactor;              // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+  UCHAR   ucReserved2[2];
+  ATOM_MEMORY_TIMING_FORMAT  asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V4;
+
+#define VRAM_MODULE_V4_MISC_RANK_MASK       0x3
+#define VRAM_MODULE_V4_MISC_DUAL_RANK       0x1
+#define VRAM_MODULE_V4_MISC_BL_MASK         0x4
+#define VRAM_MODULE_V4_MISC_BL8             0x4
+#define VRAM_MODULE_V4_MISC_DUAL_CS         0x10
+
+typedef struct _ATOM_VRAM_MODULE_V5
+{
+  ULONG     ulChannelMapCfg;                   // board dependent parameter: Channel combination
+  USHORT  usModuleSize;                     // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+  USHORT  usPrivateReserved;                // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+  USHORT  usReserved;
+  UCHAR   ucExtMemoryID;                      // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR   ucMemoryType;                     // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+  UCHAR   ucChannelNum;                     // Number of channels present in this module config
+  UCHAR   ucChannelWidth;                   // 0 - 32 bits; 1 - 64 bits
+   UCHAR   ucDensity;                        // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+   UCHAR     ucFlag;                                  // To enable/disable functionalities based on memory type
+   UCHAR     ucMisc;                                  // bit0: 0 - single rank; 1 - dual rank;   bit2: 0 - burstlength 4, 1 - burstlength 8
+  UCHAR      ucVREFI;                          // board dependent parameter
+  UCHAR   ucNPL_RT;                         // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+  UCHAR      ucPreamble;                       // [7:4] Write Preamble, [3:0] Read Preamble
+  UCHAR   ucMemorySize;                     // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+  UCHAR   ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+  USHORT   usEMRS2Value;                        // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+  USHORT   usEMRS3Value;                        // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+  UCHAR   ucMemoryVenderID;                    // Predefined, If not predefined, vendor detection table gets executed
+  UCHAR     ucRefreshRateFactor;              // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+  UCHAR     ucFIFODepth;                         // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth
+  UCHAR   ucCDR_Bandwidth;         // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+  ATOM_MEMORY_TIMING_FORMAT_V1  asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V5;
+
+
+typedef struct _ATOM_VRAM_MODULE_V6
+{
+  ULONG     ulChannelMapCfg;                   // board dependent parameter: Channel combination
+  USHORT  usModuleSize;                     // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+  USHORT  usPrivateReserved;                // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+  USHORT  usReserved;
+  UCHAR   ucExtMemoryID;                      // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR   ucMemoryType;                     // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+  UCHAR   ucChannelNum;                     // Number of channels present in this module config
+  UCHAR   ucChannelWidth;                   // 0 - 32 bits; 1 - 64 bits
+   UCHAR   ucDensity;                        // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+   UCHAR     ucFlag;                                  // To enable/disable functionalities based on memory type
+   UCHAR     ucMisc;                                  // bit0: 0 - single rank; 1 - dual rank;   bit2: 0 - burstlength 4, 1 - burstlength 8
+  UCHAR      ucVREFI;                          // board dependent parameter
+  UCHAR   ucNPL_RT;                         // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+  UCHAR      ucPreamble;                       // [7:4] Write Preamble, [3:0] Read Preamble
+  UCHAR   ucMemorySize;                     // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+  UCHAR   ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+  USHORT   usEMRS2Value;                        // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+  USHORT   usEMRS3Value;                        // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+  UCHAR   ucMemoryVenderID;                    // Predefined, If not predefined, vendor detection table gets executed
+  UCHAR     ucRefreshRateFactor;              // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+  UCHAR     ucFIFODepth;                         // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth
+  UCHAR   ucCDR_Bandwidth;         // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+  ATOM_MEMORY_TIMING_FORMAT_V2  asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V6;
+
+typedef struct _ATOM_VRAM_MODULE_V7
+{
+// Design Specific Values
+  ULONG   ulChannelMapCfg;                   // mmMC_SHARED_CHREMAP
+  USHORT  usModuleSize;                     // Size of ATOM_VRAM_MODULE_V7
+  USHORT  usPrivateReserved;                // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+  USHORT  usEnableChannels;                 // bit vector which indicate which channels are enabled
+  UCHAR   ucExtMemoryID;                    // Current memory module ID
+  UCHAR   ucMemoryType;                     // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5
+  UCHAR   ucChannelNum;                     // Number of mem. channels supported in this module
+  UCHAR   ucChannelWidth;                   // CHANNEL_16BIT/CHANNEL_32BIT/CHANNEL_64BIT
+  UCHAR   ucDensity;                        // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+  UCHAR   ucReserve;                        // In MC7x, the lower 4 bits are used as bit8-11 of memory size. In other MC code, it's not used.
+  UCHAR   ucMisc;                           // RANK_OF_THISMEMORY etc.
+  UCHAR   ucVREFI;                          // Not used.
+  UCHAR   ucNPL_RT;                         // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2.
+  UCHAR   ucPreamble;                       // [7:4] Write Preamble, [3:0] Read Preamble
+  UCHAR   ucMemorySize;                     // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+  USHORT  usSEQSettingOffset;
+  UCHAR   ucReserved;
+// Memory Module specific values
+  USHORT  usEMRS2Value;                     // EMRS2/MR2 Value.
+  USHORT  usEMRS3Value;                     // EMRS3/MR3 Value.
+  UCHAR   ucMemoryVenderID;                 // [7:4] Revision, [3:0] Vendor code
+  UCHAR   ucRefreshRateFactor;              // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+  UCHAR   ucFIFODepth;                      // FIFO depth can be detected during vendor detection, here is hardcoded per memory
+  UCHAR   ucCDR_Bandwidth;                  // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+  char    strMemPNString[20];               // part number end with '0'.
+}ATOM_VRAM_MODULE_V7;
+
+
+typedef struct _ATOM_VRAM_MODULE_V8
+{
+// Design Specific Values
+  ULONG   ulChannelMapCfg;                  // mmMC_SHARED_CHREMAP
+  USHORT  usModuleSize;                     // Size of ATOM_VRAM_MODULE_V7
+  USHORT  usMcRamCfg;                       // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+  USHORT  usEnableChannels;                 // bit vector which indicate which channels are enabled
+  UCHAR   ucExtMemoryID;                    // Current memory module ID
+  UCHAR   ucMemoryType;                     // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5
+  UCHAR   ucChannelNum;                     // Number of mem. channels supported in this module
+  UCHAR   ucChannelWidth;                   // CHANNEL_16BIT/CHANNEL_32BIT/CHANNEL_64BIT
+  UCHAR   ucDensity;                        // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+  UCHAR   ucBankCol;                        // bit[3:2]= BANK ( =2:16bank, =1:8bank, =0:4bank ) bit[1:0]=Col ( =2: 10 bit, =1:9bit, =0:8bit )
+  UCHAR   ucMisc;                           // RANK_OF_THISMEMORY etc.
+  UCHAR   ucVREFI;                          // Not used.
+  USHORT  usReserved;                       // Not used
+  USHORT  usMemorySize;                     // Total memory size in unit of MB for CONFIG_MEMSIZE zeros
+  UCHAR   ucMcTunningSetId;                 // MC phy registers set per.
+  UCHAR   ucRowNum;
+// Memory Module specific values
+  USHORT  usEMRS2Value;                     // EMRS2/MR2 Value.
+  USHORT  usEMRS3Value;                     // EMRS3/MR3 Value.
+  UCHAR   ucMemoryVenderID;                 // [7:4] Revision, [3:0] Vendor code
+  UCHAR   ucRefreshRateFactor;              // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+  UCHAR   ucFIFODepth;                      // FIFO depth can be detected during vendor detection, here is hardcoded per memory
+  UCHAR   ucCDR_Bandwidth;                  // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+
+  ULONG   ulChannelMapCfg1;                 // channel mapping for channel8~15
+  ULONG   ulBankMapCfg;
+  ULONG   ulReserved;
+  char    strMemPNString[20];               // part number end with '0'.
+}ATOM_VRAM_MODULE_V8;
+
+
+typedef struct _ATOM_VRAM_INFO_V2
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  UCHAR                      ucNumOfVRAMModule;
+  ATOM_VRAM_MODULE           aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];      // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+}ATOM_VRAM_INFO_V2;
+
+typedef struct _ATOM_VRAM_INFO_V3
+{
+  ATOM_COMMON_TABLE_HEADER  sHeader;
+  USHORT                    usMemAdjustTblOffset;                            // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+  USHORT                    usMemClkPatchTblOffset;                          // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+  USHORT                    usRerseved;
+  UCHAR                     aVID_PinsShift[9];                               // 8 bit strap maximum+terminator
+  UCHAR                     ucNumOfVRAMModule;
+  ATOM_VRAM_MODULE          aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];       // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+  ATOM_INIT_REG_BLOCK       asMemPatch;                                      // for allocation
+
+}ATOM_VRAM_INFO_V3;
+
+#define   ATOM_VRAM_INFO_LAST        ATOM_VRAM_INFO_V3
+
+typedef struct _ATOM_VRAM_INFO_V4
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  USHORT                     usMemAdjustTblOffset;                           // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+  USHORT                     usMemClkPatchTblOffset;                         // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+  USHORT                     usRerseved;
+  UCHAR                      ucMemDQ7_0ByteRemap;                            // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3
+  ULONG                      ulMemDQ7_0BitRemap;                             // each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21]
+  UCHAR                      ucReservde[4];
+  UCHAR                      ucNumOfVRAMModule;
+  ATOM_VRAM_MODULE_V4        aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];      // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+  ATOM_INIT_REG_BLOCK        asMemPatch;                                     // for allocation
+}ATOM_VRAM_INFO_V4;
+
+typedef struct _ATOM_VRAM_INFO_HEADER_V2_1
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  USHORT                     usMemAdjustTblOffset;                           // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+  USHORT                     usMemClkPatchTblOffset;                         // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+  USHORT                     usPerBytePresetOffset;                          // offset of ATOM_INIT_REG_BLOCK structure for Per Byte Offset Preset Settings
+  USHORT                     usReserved[3];
+  UCHAR                      ucNumOfVRAMModule;                              // indicate number of VRAM module
+  UCHAR                      ucMemoryClkPatchTblVer;                         // version of memory AC timing register list
+  UCHAR                      ucVramModuleVer;                                // indicate ATOM_VRAM_MODUE version
+  UCHAR                      ucReserved;
+  ATOM_VRAM_MODULE_V7        aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];      // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+}ATOM_VRAM_INFO_HEADER_V2_1;
+
+typedef struct _ATOM_VRAM_INFO_HEADER_V2_2
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  USHORT                     usMemAdjustTblOffset;                           // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+  USHORT                     usMemClkPatchTblOffset;                         // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+  USHORT                     usMcAdjustPerTileTblOffset;                     // offset of ATOM_INIT_REG_BLOCK structure for Per Byte Offset Preset Settings
+  USHORT                     usMcPhyInitTableOffset;                         // offset of ATOM_INIT_REG_BLOCK structure for MC phy init set
+  USHORT                     usDramDataRemapTblOffset;                       // offset of ATOM_DRAM_DATA_REMAP array to indicate DRAM data lane to GPU mapping
+  USHORT                     usReserved1;
+  UCHAR                      ucNumOfVRAMModule;                              // indicate number of VRAM module
+  UCHAR                      ucMemoryClkPatchTblVer;                         // version of memory AC timing register list
+  UCHAR                      ucVramModuleVer;                                // indicate ATOM_VRAM_MODUE version
+  UCHAR                      ucMcPhyTileNum;                                 // indicate the MCD tile number which use in DramDataRemapTbl and usMcAdjustPerTileTblOffset
+  ATOM_VRAM_MODULE_V8        aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];      // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+}ATOM_VRAM_INFO_HEADER_V2_2;
+
+
+typedef struct _ATOM_DRAM_DATA_REMAP
+{
+  UCHAR ucByteRemapCh0;
+  UCHAR ucByteRemapCh1;
+  ULONG ulByte0BitRemapCh0;
+  ULONG ulByte1BitRemapCh0;
+  ULONG ulByte2BitRemapCh0;
+  ULONG ulByte3BitRemapCh0;
+  ULONG ulByte0BitRemapCh1;
+  ULONG ulByte1BitRemapCh1;
+  ULONG ulByte2BitRemapCh1;
+  ULONG ulByte3BitRemapCh1;
+}ATOM_DRAM_DATA_REMAP;
+
+typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  UCHAR                      aVID_PinsShift[9];                              // 8 bit strap maximum+terminator
+}ATOM_VRAM_GPIO_DETECTION_INFO;
+
+
+typedef struct _ATOM_MEMORY_TRAINING_INFO
+{
+   ATOM_COMMON_TABLE_HEADER   sHeader;
+   UCHAR                                  ucTrainingLoop;
+   UCHAR                                  ucReserved[3];
+   ATOM_INIT_REG_BLOCK             asMemTrainingSetting;
+}ATOM_MEMORY_TRAINING_INFO;
+
+
+typedef struct SW_I2C_CNTL_DATA_PARAMETERS
+{
+  UCHAR    ucControl;
+  UCHAR    ucData;
+  UCHAR    ucSatus;
+  UCHAR    ucTemp;
+} SW_I2C_CNTL_DATA_PARAMETERS;
+
+#define SW_I2C_CNTL_DATA_PS_ALLOCATION  SW_I2C_CNTL_DATA_PARAMETERS
+
+typedef struct _SW_I2C_IO_DATA_PARAMETERS
+{
+  USHORT   GPIO_Info;
+  UCHAR    ucAct;
+  UCHAR    ucData;
+ } SW_I2C_IO_DATA_PARAMETERS;
+
+#define SW_I2C_IO_DATA_PS_ALLOCATION  SW_I2C_IO_DATA_PARAMETERS
+
+/****************************SW I2C CNTL DEFINITIONS**********************/
+#define SW_I2C_IO_RESET       0
+#define SW_I2C_IO_GET         1
+#define SW_I2C_IO_DRIVE       2
+#define SW_I2C_IO_SET         3
+#define SW_I2C_IO_START       4
+
+#define SW_I2C_IO_CLOCK       0
+#define SW_I2C_IO_DATA        0x80
+
+#define SW_I2C_IO_ZERO        0
+#define SW_I2C_IO_ONE         0x100
+
+#define SW_I2C_CNTL_READ      0
+#define SW_I2C_CNTL_WRITE     1
+#define SW_I2C_CNTL_START     2
+#define SW_I2C_CNTL_STOP      3
+#define SW_I2C_CNTL_OPEN      4
+#define SW_I2C_CNTL_CLOSE     5
+#define SW_I2C_CNTL_WRITE1BIT 6
+
+//==============================VESA definition Portion===============================
+#define VESA_OEM_PRODUCT_REV                     '01.00'
+#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT        0xBB   //refer to VBE spec p.32, no TTY support
+#define VESA_MODE_WIN_ATTRIBUTE                       7
+#define VESA_WIN_SIZE                                      64
+
+typedef struct _PTR_32_BIT_STRUCTURE
+{
+   USHORT   Offset16;
+   USHORT   Segment16;
+} PTR_32_BIT_STRUCTURE;
+
+typedef union _PTR_32_BIT_UNION
+{
+   PTR_32_BIT_STRUCTURE   SegmentOffset;
+   ULONG                       Ptr32_Bit;
+} PTR_32_BIT_UNION;
+
+typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE
+{
+   UCHAR                  VbeSignature[4];
+   USHORT                VbeVersion;
+   PTR_32_BIT_UNION   OemStringPtr;
+   UCHAR                  Capabilities[4];
+   PTR_32_BIT_UNION   VideoModePtr;
+   USHORT                TotalMemory;
+} VBE_1_2_INFO_BLOCK_UPDATABLE;
+
+
+typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE
+{
+   VBE_1_2_INFO_BLOCK_UPDATABLE   CommonBlock;
+   USHORT                         OemSoftRev;
+   PTR_32_BIT_UNION            OemVendorNamePtr;
+   PTR_32_BIT_UNION            OemProductNamePtr;
+   PTR_32_BIT_UNION            OemProductRevPtr;
+} VBE_2_0_INFO_BLOCK_UPDATABLE;
+
+typedef union _VBE_VERSION_UNION
+{
+   VBE_2_0_INFO_BLOCK_UPDATABLE   VBE_2_0_InfoBlock;
+   VBE_1_2_INFO_BLOCK_UPDATABLE   VBE_1_2_InfoBlock;
+} VBE_VERSION_UNION;
+
+typedef struct _VBE_INFO_BLOCK
+{
+   VBE_VERSION_UNION         UpdatableVBE_Info;
+   UCHAR                        Reserved[222];
+   UCHAR                        OemData[256];
+} VBE_INFO_BLOCK;
+
+typedef struct _VBE_FP_INFO
+{
+  USHORT   HSize;
+  USHORT   VSize;
+  USHORT   FPType;
+  UCHAR    RedBPP;
+  UCHAR    GreenBPP;
+  UCHAR    BlueBPP;
+  UCHAR    ReservedBPP;
+  ULONG    RsvdOffScrnMemSize;
+  ULONG    RsvdOffScrnMEmPtr;
+  UCHAR    Reserved[14];
+} VBE_FP_INFO;
+
+typedef struct _VESA_MODE_INFO_BLOCK
+{
+// Mandatory information for all VBE revisions
+  USHORT   ModeAttributes;  //         dw   ?   ; mode attributes
+  UCHAR    WinAAttributes;  //         db   ?   ; window A attributes
+  UCHAR    WinBAttributes;  //         db   ?   ; window B attributes
+  USHORT   WinGranularity;  //         dw   ?   ; window granularity
+  USHORT   WinSize;         //         dw   ?   ; window size
+  USHORT   WinASegment;     //         dw   ?   ; window A start segment
+  USHORT   WinBSegment;     //         dw   ?   ; window B start segment
+  ULONG    WinFuncPtr;      //         dd   ?   ; real mode pointer to window function
+  USHORT   BytesPerScanLine;//         dw   ?   ; bytes per scan line
+
+//; Mandatory information for VBE 1.2 and above
+  USHORT   XResolution;      //         dw   ?   ; horizontal resolution in pixels or characters
+  USHORT   YResolution;      //         dw   ?   ; vertical resolution in pixels or characters
+  UCHAR    XCharSize;        //         db   ?   ; character cell width in pixels
+  UCHAR    YCharSize;        //         db   ?   ; character cell height in pixels
+  UCHAR    NumberOfPlanes;   //         db   ?   ; number of memory planes
+  UCHAR    BitsPerPixel;     //         db   ?   ; bits per pixel
+  UCHAR    NumberOfBanks;    //         db   ?   ; number of banks
+  UCHAR    MemoryModel;      //         db   ?   ; memory model type
+  UCHAR    BankSize;         //         db   ?   ; bank size in KB
+  UCHAR    NumberOfImagePages;//        db   ?   ; number of images
+  UCHAR    ReservedForPageFunction;//db   1   ; reserved for page function
+
+//; Direct Color fields(required for direct/6 and YUV/7 memory models)
+  UCHAR    RedMaskSize;        //      db   ?   ; size of direct color red mask in bits
+  UCHAR    RedFieldPosition;   //      db   ?   ; bit position of lsb of red mask
+  UCHAR    GreenMaskSize;      //      db   ?   ; size of direct color green mask in bits
+  UCHAR    GreenFieldPosition; //      db   ?   ; bit position of lsb of green mask
+  UCHAR    BlueMaskSize;       //      db   ?   ; size of direct color blue mask in bits
+  UCHAR    BlueFieldPosition;  //      db   ?   ; bit position of lsb of blue mask
+  UCHAR    RsvdMaskSize;       //      db   ?   ; size of direct color reserved mask in bits
+  UCHAR    RsvdFieldPosition;  //      db   ?   ; bit position of lsb of reserved mask
+  UCHAR    DirectColorModeInfo;//      db   ?   ; direct color mode attributes
+
+//; Mandatory information for VBE 2.0 and above
+  ULONG    PhysBasePtr;        //      dd   ?   ; physical address for flat memory frame buffer
+  ULONG    Reserved_1;         //      dd   0   ; reserved - always set to 0
+  USHORT   Reserved_2;         //     dw   0   ; reserved - always set to 0
+
+//; Mandatory information for VBE 3.0 and above
+  USHORT   LinBytesPerScanLine;  //   dw   ?   ; bytes per scan line for linear modes
+  UCHAR    BnkNumberOfImagePages;//   db   ?   ; number of images for banked modes
+  UCHAR    LinNumberOfImagPages; //   db   ?   ; number of images for linear modes
+  UCHAR    LinRedMaskSize;       //   db   ?   ; size of direct color red mask(linear modes)
+  UCHAR    LinRedFieldPosition;  //   db   ?   ; bit position of lsb of red mask(linear modes)
+  UCHAR    LinGreenMaskSize;     //   db   ?   ; size of direct color green mask(linear modes)
+  UCHAR    LinGreenFieldPosition;//   db   ?   ; bit position of lsb of green mask(linear modes)
+  UCHAR    LinBlueMaskSize;      //   db   ?   ; size of direct color blue mask(linear modes)
+  UCHAR    LinBlueFieldPosition; //   db   ?   ; bit position of lsb of blue mask(linear modes)
+  UCHAR    LinRsvdMaskSize;      //   db   ?   ; size of direct color reserved mask(linear modes)
+  UCHAR    LinRsvdFieldPosition; //   db   ?   ; bit position of lsb of reserved mask(linear modes)
+  ULONG    MaxPixelClock;        //   dd   ?   ; maximum pixel clock(in Hz) for graphics mode
+  UCHAR    Reserved;             //   db   190 dup (0)
+} VESA_MODE_INFO_BLOCK;
+
+// BIOS function CALLS
+#define ATOM_BIOS_EXTENDED_FUNCTION_CODE        0xA0           // ATI Extended Function code
+#define ATOM_BIOS_FUNCTION_COP_MODE             0x00
+#define ATOM_BIOS_FUNCTION_SHORT_QUERY1         0x04
+#define ATOM_BIOS_FUNCTION_SHORT_QUERY2         0x05
+#define ATOM_BIOS_FUNCTION_SHORT_QUERY3         0x06
+#define ATOM_BIOS_FUNCTION_GET_DDC              0x0B
+#define ATOM_BIOS_FUNCTION_ASIC_DSTATE          0x0E
+#define ATOM_BIOS_FUNCTION_DEBUG_PLAY           0x0F
+#define ATOM_BIOS_FUNCTION_STV_STD              0x16
+#define ATOM_BIOS_FUNCTION_DEVICE_DET           0x17
+#define ATOM_BIOS_FUNCTION_DEVICE_SWITCH        0x18
+
+#define ATOM_BIOS_FUNCTION_PANEL_CONTROL        0x82
+#define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET       0x83
+#define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH    0x84
+#define ATOM_BIOS_FUNCTION_HW_ICON              0x8A
+#define ATOM_BIOS_FUNCTION_SET_CMOS             0x8B
+#define SUB_FUNCTION_UPDATE_DISPLAY_INFO        0x8000          // Sub function 80
+#define SUB_FUNCTION_UPDATE_EXPANSION_INFO      0x8100          // Sub function 80
+
+#define ATOM_BIOS_FUNCTION_DISPLAY_INFO         0x8D
+#define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF        0x8E
+#define ATOM_BIOS_FUNCTION_VIDEO_STATE          0x8F
+#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE    0x0300          // Sub function 03
+#define ATOM_SUB_FUNCTION_GET_LIDSTATE          0x0700          // Sub function 7
+#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE  0x1400          // Notify caller the current thermal state
+#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300          // Notify caller the current critical state
+#define ATOM_SUB_FUNCTION_SET_LIDSTATE          0x8500          // Sub function 85
+#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900// Sub function 89
+#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT    0x9400          // Notify caller that ADC is supported
+
+
+#define ATOM_BIOS_FUNCTION_VESA_DPMS            0x4F10          // Set DPMS
+#define ATOM_SUB_FUNCTION_SET_DPMS              0x0001          // BL: Sub function 01
+#define ATOM_SUB_FUNCTION_GET_DPMS              0x0002          // BL: Sub function 02
+#define ATOM_PARAMETER_VESA_DPMS_ON             0x0000          // BH Parameter for DPMS ON.
+#define ATOM_PARAMETER_VESA_DPMS_STANDBY        0x0100          // BH Parameter for DPMS STANDBY
+#define ATOM_PARAMETER_VESA_DPMS_SUSPEND        0x0200          // BH Parameter for DPMS SUSPEND
+#define ATOM_PARAMETER_VESA_DPMS_OFF            0x0400          // BH Parameter for DPMS OFF
+#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON      0x0800          // BH Parameter for DPMS REDUCE ON (NOT SUPPORTED)
+
+#define ATOM_BIOS_RETURN_CODE_MASK              0x0000FF00L
+#define ATOM_BIOS_REG_HIGH_MASK                 0x0000FF00L
+#define ATOM_BIOS_REG_LOW_MASK                  0x000000FFL
+
+// structure used for VBIOS only
+
+//DispOutInfoTable
+typedef struct _ASIC_TRANSMITTER_INFO
+{
+   USHORT usTransmitterObjId;
+   USHORT usSupportDevice;
+  UCHAR  ucTransmitterCmdTblId;
+   UCHAR  ucConfig;
+   UCHAR  ucEncoderID;                //available 1st encoder ( default )
+   UCHAR  ucOptionEncoderID;    //available 2nd encoder ( optional )
+   UCHAR  uc2ndEncoderID;
+   UCHAR  ucReserved;
+}ASIC_TRANSMITTER_INFO;
+
+#define ASIC_TRANSMITTER_INFO_CONFIG__DVO_SDR_MODE          0x01
+#define ASIC_TRANSMITTER_INFO_CONFIG__COHERENT_MODE         0x02
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODEROBJ_ID_MASK    0xc4
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_A             0x00
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_B             0x04
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_C             0x40
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_D             0x44
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_E             0x80
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_F             0x84
+
+typedef struct _ASIC_ENCODER_INFO
+{
+   UCHAR ucEncoderID;
+   UCHAR ucEncoderConfig;
+  USHORT usEncoderCmdTblId;
+}ASIC_ENCODER_INFO;
+
+typedef struct _ATOM_DISP_OUT_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+   USHORT ptrTransmitterInfo;
+   USHORT ptrEncoderInfo;
+   ASIC_TRANSMITTER_INFO  asTransmitterInfo[1];
+   ASIC_ENCODER_INFO      asEncoderInfo[1];
+}ATOM_DISP_OUT_INFO;
+
+
+typedef struct _ATOM_DISP_OUT_INFO_V2
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+   USHORT ptrTransmitterInfo;
+   USHORT ptrEncoderInfo;
+  USHORT ptrMainCallParserFar;                  // direct address of main parser call in VBIOS binary.
+   ASIC_TRANSMITTER_INFO  asTransmitterInfo[1];
+   ASIC_ENCODER_INFO      asEncoderInfo[1];
+}ATOM_DISP_OUT_INFO_V2;
+
+
+typedef struct _ATOM_DISP_CLOCK_ID {
+  UCHAR ucPpllId;
+  UCHAR ucPpllAttribute;
+}ATOM_DISP_CLOCK_ID;
+
+// ucPpllAttribute
+#define CLOCK_SOURCE_SHAREABLE            0x01
+#define CLOCK_SOURCE_DP_MODE              0x02
+#define CLOCK_SOURCE_NONE_DP_MODE         0x04
+
+//DispOutInfoTable
+typedef struct _ASIC_TRANSMITTER_INFO_V2
+{
+   USHORT usTransmitterObjId;
+   USHORT usDispClkIdOffset;    // point to clock source id list supported by Encoder Object
+  UCHAR  ucTransmitterCmdTblId;
+   UCHAR  ucConfig;
+   UCHAR  ucEncoderID;                // available 1st encoder ( default )
+   UCHAR  ucOptionEncoderID;    // available 2nd encoder ( optional )
+   UCHAR  uc2ndEncoderID;
+   UCHAR  ucReserved;
+}ASIC_TRANSMITTER_INFO_V2;
+
+typedef struct _ATOM_DISP_OUT_INFO_V3
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  USHORT ptrTransmitterInfo;
+  USHORT ptrEncoderInfo;
+  USHORT ptrMainCallParserFar;                  // direct address of main parser call in VBIOS binary.
+  USHORT usReserved;
+  UCHAR  ucDCERevision;
+  UCHAR  ucMaxDispEngineNum;
+  UCHAR  ucMaxActiveDispEngineNum;
+  UCHAR  ucMaxPPLLNum;
+  UCHAR  ucCoreRefClkSource;                    // value of CORE_REF_CLK_SOURCE
+  UCHAR  ucDispCaps;
+  UCHAR  ucReserved[2];
+  ASIC_TRANSMITTER_INFO_V2  asTransmitterInfo[1];     // for alligment only
+}ATOM_DISP_OUT_INFO_V3;
+
+//ucDispCaps
+#define DISPLAY_CAPS__DP_PCLK_FROM_PPLL        0x01
+#define DISPLAY_CAPS__FORCE_DISPDEV_CONNECTED  0x02
+
+typedef enum CORE_REF_CLK_SOURCE{
+  CLOCK_SRC_XTALIN=0,
+  CLOCK_SRC_XO_IN=1,
+  CLOCK_SRC_XO_IN2=2,
+}CORE_REF_CLK_SOURCE;
+
+// DispDevicePriorityInfo
+typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+   USHORT asDevicePriority[16];
+}ATOM_DISPLAY_DEVICE_PRIORITY_INFO;
+
+//ProcessAuxChannelTransactionTable
+typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
+{
+   USHORT  lpAuxRequest;
+   USHORT  lpDataOut;
+   UCHAR   ucChannelID;
+   union
+   {
+  UCHAR   ucReplyStatus;
+   UCHAR   ucDelay;
+   };
+  UCHAR   ucDataOutLen;
+   UCHAR   ucReserved;
+}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS;
+
+//ProcessAuxChannelTransactionTable
+typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2
+{
+   USHORT   lpAuxRequest;
+   USHORT  lpDataOut;
+   UCHAR      ucChannelID;
+   union
+   {
+  UCHAR   ucReplyStatus;
+   UCHAR   ucDelay;
+   };
+  UCHAR   ucDataOutLen;
+   UCHAR   ucHPD_ID;                                       //=0: HPD1, =1: HPD2, =2: HPD3, =3: HPD4, =4: HPD5, =5: HPD6
+}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2;
+
+#define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION         PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
+
+//GetSinkType
+
+typedef struct _DP_ENCODER_SERVICE_PARAMETERS
+{
+   USHORT ucLinkClock;
+   union
+   {
+   UCHAR ucConfig;            // for DP training command
+   UCHAR ucI2cId;            // use for GET_SINK_TYPE command
+   };
+   UCHAR ucAction;
+   UCHAR ucStatus;
+   UCHAR ucLaneNum;
+   UCHAR ucReserved[2];
+}DP_ENCODER_SERVICE_PARAMETERS;
+
+// ucAction
+#define ATOM_DP_ACTION_GET_SINK_TYPE                     0x01
+
+#define DP_ENCODER_SERVICE_PS_ALLOCATION            WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+
+typedef struct _DP_ENCODER_SERVICE_PARAMETERS_V2
+{
+   USHORT usExtEncoderObjId;   // External Encoder Object Id, output parameter only, use when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
+  UCHAR  ucAuxId;
+  UCHAR  ucAction;
+  UCHAR  ucSinkType;          // Iput and Output parameters.
+  UCHAR  ucHPDId;             // Input parameter, used when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
+   UCHAR  ucReserved[2];
+}DP_ENCODER_SERVICE_PARAMETERS_V2;
+
+typedef struct _DP_ENCODER_SERVICE_PS_ALLOCATION_V2
+{
+  DP_ENCODER_SERVICE_PARAMETERS_V2 asDPServiceParam;
+  PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 asAuxParam;
+}DP_ENCODER_SERVICE_PS_ALLOCATION_V2;
+
+// ucAction
+#define DP_SERVICE_V2_ACTION_GET_SINK_TYPE                     0x01
+#define DP_SERVICE_V2_ACTION_DET_LCD_CONNECTION             0x02
+
+
+// DP_TRAINING_TABLE
+#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR            ATOM_DP_TRAINING_TBL_ADDR
+#define DPCD_SET_SS_CNTL_TBL_ADDR                                       (ATOM_DP_TRAINING_TBL_ADDR + 8 )
+#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR                     (ATOM_DP_TRAINING_TBL_ADDR + 16 )
+#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR                        (ATOM_DP_TRAINING_TBL_ADDR + 24 )
+#define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR                        (ATOM_DP_TRAINING_TBL_ADDR + 32)
+#define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR                     (ATOM_DP_TRAINING_TBL_ADDR + 40)
+#define   DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR                     (ATOM_DP_TRAINING_TBL_ADDR + 48)
+#define DP_I2C_AUX_DDC_WRITE_START_TBL_ADDR                        (ATOM_DP_TRAINING_TBL_ADDR + 60)
+#define DP_I2C_AUX_DDC_WRITE_TBL_ADDR                                 (ATOM_DP_TRAINING_TBL_ADDR + 64)
+#define DP_I2C_AUX_DDC_READ_START_TBL_ADDR                        (ATOM_DP_TRAINING_TBL_ADDR + 72)
+#define DP_I2C_AUX_DDC_READ_TBL_ADDR                                 (ATOM_DP_TRAINING_TBL_ADDR + 76)
+#define DP_I2C_AUX_DDC_WRITE_END_TBL_ADDR                 (ATOM_DP_TRAINING_TBL_ADDR + 80)
+#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR                           (ATOM_DP_TRAINING_TBL_ADDR + 84)
+
+
+typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
+{
+   UCHAR   ucI2CSpeed;
+    union
+   {
+   UCHAR ucRegIndex;
+   UCHAR ucStatus;
+   };
+   USHORT  lpI2CDataOut;
+  UCHAR   ucFlag;
+  UCHAR   ucTransBytes;
+  UCHAR   ucSlaveAddr;
+  UCHAR   ucLineNumber;
+}PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS;
+
+#define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION       PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
+
+//ucFlag
+#define HW_I2C_WRITE        1
+#define HW_I2C_READ         0
+#define I2C_2BYTE_ADDR      0x02
+
+/****************************************************************************/
+// Structures used by HW_Misc_OperationTable
+/****************************************************************************/
+typedef struct  _ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1
+{
+  UCHAR  ucCmd;                //  Input: To tell which action to take
+  UCHAR  ucReserved[3];
+  ULONG  ulReserved;
+}ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1;
+
+typedef struct  _ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1
+{
+  UCHAR  ucReturnCode;        // Output: Return value base on action was taken
+  UCHAR  ucReserved[3];
+  ULONG  ulReserved;
+}ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1;
+
+// Actions code
+#define  ATOM_GET_SDI_SUPPORT              0xF0
+
+// Return code
+#define  ATOM_UNKNOWN_CMD                   0
+#define  ATOM_FEATURE_NOT_SUPPORTED         1
+#define  ATOM_FEATURE_SUPPORTED             2
+
+typedef struct _ATOM_HW_MISC_OPERATION_PS_ALLOCATION
+{
+   ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1        sInput_Output;
+   PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS         sReserved;
+}ATOM_HW_MISC_OPERATION_PS_ALLOCATION;
+
+/****************************************************************************/
+
+typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
+{
+   UCHAR ucHWBlkInst;                // HW block instance, 0, 1, 2, ...
+   UCHAR ucReserved[3];
+}SET_HWBLOCK_INSTANCE_PARAMETER_V2;
+
+#define HWBLKINST_INSTANCE_MASK       0x07
+#define HWBLKINST_HWBLK_MASK          0xF0
+#define HWBLKINST_HWBLK_SHIFT         0x04
+
+//ucHWBlock
+#define SELECT_DISP_ENGINE            0
+#define SELECT_DISP_PLL               1
+#define SELECT_DCIO_UNIPHY_LINK0      2
+#define SELECT_DCIO_UNIPHY_LINK1      3
+#define SELECT_DCIO_IMPCAL            4
+#define SELECT_DCIO_DIG               6
+#define SELECT_CRTC_PIXEL_RATE        7
+#define SELECT_VGA_BLK                8
+
+// DIGTransmitterInfoTable structure used to program UNIPHY settings
+typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_1{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  USHORT usDPVsPreEmphSettingOffset;     // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock
+  USHORT usPhyAnalogRegListOffset;       // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info
+  USHORT usPhyAnalogSettingOffset;       // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range
+  USHORT usPhyPllRegListOffset;          // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info
+  USHORT usPhyPllSettingOffset;          // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings
+}DIG_TRANSMITTER_INFO_HEADER_V3_1;
+
+typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_2{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  USHORT usDPVsPreEmphSettingOffset;     // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock
+  USHORT usPhyAnalogRegListOffset;       // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info
+  USHORT usPhyAnalogSettingOffset;       // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range
+  USHORT usPhyPllRegListOffset;          // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info
+  USHORT usPhyPllSettingOffset;          // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings
+  USHORT usDPSSRegListOffset;            // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy SS Pll register Info
+  USHORT usDPSSSettingOffset;            // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy SS Pll Settings
+}DIG_TRANSMITTER_INFO_HEADER_V3_2;
+
+
+typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_3{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  USHORT usDPVsPreEmphSettingOffset;     // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock
+  USHORT usPhyAnalogRegListOffset;       // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info
+  USHORT usPhyAnalogSettingOffset;       // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range
+  USHORT usPhyPllRegListOffset;          // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info
+  USHORT usPhyPllSettingOffset;          // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings
+  USHORT usDPSSRegListOffset;            // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy SS Pll register Info
+  USHORT usDPSSSettingOffset;            // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy SS Pll Settings
+  USHORT usEDPVsLegacyModeOffset;        // offset of PHY_ANALOG_SETTING_INFO * with eDP Legacy Mode Voltage Swing and Pre-Emphasis for each Link clock
+  USHORT useDPVsLowVdiffModeOffset;      // offset of PHY_ANALOG_SETTING_INFO * with eDP Low VDiff Mode Voltage Swing and Pre-Emphasis for each Link clock
+  USHORT useDPVsHighVdiffModeOffset;     // offset of PHY_ANALOG_SETTING_INFO * with eDP High VDiff Mode Voltage Swing and Pre-Emphasis for each Link clock
+  USHORT useDPVsStretchModeOffset;       // offset of PHY_ANALOG_SETTING_INFO * with eDP Stretch Mode Voltage Swing and Pre-Emphasis for each Link clock
+  USHORT useDPVsSingleVdiffModeOffset;   // offset of PHY_ANALOG_SETTING_INFO * with eDP Single Vdiff Mode Voltage Swing and Pre-Emphasis for each Link clock
+  USHORT useDPVsVariablePremModeOffset;  // offset of PHY_ANALOG_SETTING_INFO * with eDP Single Vidff+Variable PreEmphasis Voltage Swing and Pre-Emphasis for each Link clock
+}DIG_TRANSMITTER_INFO_HEADER_V3_3;
+
+
+typedef struct _CLOCK_CONDITION_REGESTER_INFO{
+  USHORT usRegisterIndex;
+  UCHAR  ucStartBit;
+  UCHAR  ucEndBit;
+}CLOCK_CONDITION_REGESTER_INFO;
+
+typedef struct _CLOCK_CONDITION_SETTING_ENTRY{
+  USHORT usMaxClockFreq;
+  UCHAR  ucEncodeMode;
+  UCHAR  ucPhySel;
+  ULONG  ulAnalogSetting[1];
+}CLOCK_CONDITION_SETTING_ENTRY;
+
+typedef struct _CLOCK_CONDITION_SETTING_INFO{
+  USHORT usEntrySize;
+  CLOCK_CONDITION_SETTING_ENTRY asClkCondSettingEntry[1];
+}CLOCK_CONDITION_SETTING_INFO;
+
+typedef struct _PHY_CONDITION_REG_VAL{
+  ULONG  ulCondition;
+  ULONG  ulRegVal;
+}PHY_CONDITION_REG_VAL;
+
+typedef struct _PHY_CONDITION_REG_VAL_V2{
+  ULONG  ulCondition;
+  UCHAR  ucCondition2;
+  ULONG  ulRegVal;
+}PHY_CONDITION_REG_VAL_V2;
+
+typedef struct _PHY_CONDITION_REG_INFO{
+  USHORT usRegIndex;
+  USHORT usSize;
+  PHY_CONDITION_REG_VAL asRegVal[1];
+}PHY_CONDITION_REG_INFO;
+
+typedef struct _PHY_CONDITION_REG_INFO_V2{
+  USHORT usRegIndex;
+  USHORT usSize;
+  PHY_CONDITION_REG_VAL_V2 asRegVal[1];
+}PHY_CONDITION_REG_INFO_V2;
+
+typedef struct _PHY_ANALOG_SETTING_INFO{
+  UCHAR  ucEncodeMode;
+  UCHAR  ucPhySel;
+  USHORT usSize;
+  PHY_CONDITION_REG_INFO  asAnalogSetting[1];
+}PHY_ANALOG_SETTING_INFO;
+
+typedef struct _PHY_ANALOG_SETTING_INFO_V2{
+  UCHAR  ucEncodeMode;
+  UCHAR  ucPhySel;
+  USHORT usSize;
+  PHY_CONDITION_REG_INFO_V2  asAnalogSetting[1];
+}PHY_ANALOG_SETTING_INFO_V2;
+
+
+typedef struct _GFX_HAVESTING_PARAMETERS {
+  UCHAR ucGfxBlkId;                        //GFX blk id to be harvested, like CU, RB or PRIM
+  UCHAR ucReserved;                        //reserved
+  UCHAR ucActiveUnitNumPerSH;              //requested active CU/RB/PRIM number per shader array
+  UCHAR ucMaxUnitNumPerSH;                 //max CU/RB/PRIM number per shader array
+} GFX_HAVESTING_PARAMETERS;
+
+//ucGfxBlkId
+#define GFX_HARVESTING_CU_ID               0
+#define GFX_HARVESTING_RB_ID               1
+#define GFX_HARVESTING_PRIM_ID             2
+
+
+typedef struct _VBIOS_ROM_HEADER{
+  UCHAR  PciRomSignature[2];
+  UCHAR  ucPciRomSizeIn512bytes;
+  UCHAR  ucJumpCoreMainInitBIOS;
+  USHORT usLabelCoreMainInitBIOS;
+  UCHAR  PciReservedSpace[18];
+  USHORT usPciDataStructureOffset;
+  UCHAR  Rsvd1d_1a[4];
+  char   strIbm[3];
+  UCHAR  CheckSum[14];
+  UCHAR  ucBiosMsgNumber;
+  char   str761295520[16];
+  USHORT usLabelCoreVPOSTNoMode;
+  USHORT usSpecialPostOffset;
+  UCHAR  ucSpeicalPostImageSizeIn512Bytes;
+  UCHAR  Rsved47_45[3];
+  USHORT usROM_HeaderInformationTableOffset;
+  UCHAR  Rsved4f_4a[6];
+  char   strBuildTimeStamp[20];
+  UCHAR  ucJumpCoreXFuncFarHandler;
+  USHORT usCoreXFuncFarHandlerOffset;
+  UCHAR  ucRsved67;
+  UCHAR  ucJumpCoreVFuncFarHandler;
+  USHORT usCoreVFuncFarHandlerOffset;
+  UCHAR  Rsved6d_6b[3];
+  USHORT usATOM_BIOS_MESSAGE_Offset;
+}VBIOS_ROM_HEADER;
+
+/****************************************************************************/
+//Portion VI: Definitinos for vbios MC scratch registers that driver used
+/****************************************************************************/
+
+#define MC_MISC0__MEMORY_TYPE_MASK    0xF0000000
+#define MC_MISC0__MEMORY_TYPE__GDDR1  0x10000000
+#define MC_MISC0__MEMORY_TYPE__DDR2   0x20000000
+#define MC_MISC0__MEMORY_TYPE__GDDR3  0x30000000
+#define MC_MISC0__MEMORY_TYPE__GDDR4  0x40000000
+#define MC_MISC0__MEMORY_TYPE__GDDR5  0x50000000
+#define MC_MISC0__MEMORY_TYPE__HBM    0x60000000
+#define MC_MISC0__MEMORY_TYPE__DDR3   0xB0000000
+
+#define ATOM_MEM_TYPE_DDR_STRING      "DDR"
+#define ATOM_MEM_TYPE_DDR2_STRING     "DDR2"
+#define ATOM_MEM_TYPE_GDDR3_STRING    "GDDR3"
+#define ATOM_MEM_TYPE_GDDR4_STRING    "GDDR4"
+#define ATOM_MEM_TYPE_GDDR5_STRING    "GDDR5"
+#define ATOM_MEM_TYPE_HBM_STRING      "HBM"
+#define ATOM_MEM_TYPE_DDR3_STRING     "DDR3"
+
+/****************************************************************************/
+//Portion VII: Definitinos being oboselete
+/****************************************************************************/
+
+//==========================================================================================
+//Remove the definitions below when driver is ready!
+typedef struct _ATOM_DAC_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  USHORT                   usMaxFrequency;      // in 10kHz unit
+  USHORT                   usReserved;
+}ATOM_DAC_INFO;
+
+
+typedef struct  _COMPASSIONATE_DATA
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+
+  //==============================  DAC1 portion
+  UCHAR   ucDAC1_BG_Adjustment;
+  UCHAR   ucDAC1_DAC_Adjustment;
+  USHORT  usDAC1_FORCE_Data;
+  //==============================  DAC2 portion
+  UCHAR   ucDAC2_CRT2_BG_Adjustment;
+  UCHAR   ucDAC2_CRT2_DAC_Adjustment;
+  USHORT  usDAC2_CRT2_FORCE_Data;
+  USHORT  usDAC2_CRT2_MUX_RegisterIndex;
+  UCHAR   ucDAC2_CRT2_MUX_RegisterInfo;     //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+  UCHAR   ucDAC2_NTSC_BG_Adjustment;
+  UCHAR   ucDAC2_NTSC_DAC_Adjustment;
+  USHORT  usDAC2_TV1_FORCE_Data;
+  USHORT  usDAC2_TV1_MUX_RegisterIndex;
+  UCHAR   ucDAC2_TV1_MUX_RegisterInfo;      //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+  UCHAR   ucDAC2_CV_BG_Adjustment;
+  UCHAR   ucDAC2_CV_DAC_Adjustment;
+  USHORT  usDAC2_CV_FORCE_Data;
+  USHORT  usDAC2_CV_MUX_RegisterIndex;
+  UCHAR   ucDAC2_CV_MUX_RegisterInfo;       //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+  UCHAR   ucDAC2_PAL_BG_Adjustment;
+  UCHAR   ucDAC2_PAL_DAC_Adjustment;
+  USHORT  usDAC2_TV2_FORCE_Data;
+}COMPASSIONATE_DATA;
+
+/****************************Supported Device Info Table Definitions**********************/
+//  ucConnectInfo:
+//    [7:4] - connector type
+//      = 1   - VGA connector
+//      = 2   - DVI-I
+//      = 3   - DVI-D
+//      = 4   - DVI-A
+//      = 5   - SVIDEO
+//      = 6   - COMPOSITE
+//      = 7   - LVDS
+//      = 8   - DIGITAL LINK
+//      = 9   - SCART
+//      = 0xA - HDMI_type A
+//      = 0xB - HDMI_type B
+//      = 0xE - Special case1 (DVI+DIN)
+//      Others=TBD
+//    [3:0] - DAC Associated
+//      = 0   - no DAC
+//      = 1   - DACA
+//      = 2   - DACB
+//      = 3   - External DAC
+//      Others=TBD
+//
+
+typedef struct _ATOM_CONNECTOR_INFO
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR   bfConnectorType:4;
+  UCHAR   bfAssociatedDAC:4;
+#else
+  UCHAR   bfAssociatedDAC:4;
+  UCHAR   bfConnectorType:4;
+#endif
+}ATOM_CONNECTOR_INFO;
+
+typedef union _ATOM_CONNECTOR_INFO_ACCESS
+{
+  ATOM_CONNECTOR_INFO sbfAccess;
+  UCHAR               ucAccess;
+}ATOM_CONNECTOR_INFO_ACCESS;
+
+typedef struct _ATOM_CONNECTOR_INFO_I2C
+{
+  ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo;
+  ATOM_I2C_ID_CONFIG_ACCESS  sucI2cId;
+}ATOM_CONNECTOR_INFO_I2C;
+
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  USHORT                    usDeviceSupport;
+  ATOM_CONNECTOR_INFO_I2C   asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO];
+}ATOM_SUPPORTED_DEVICES_INFO;
+
+#define NO_INT_SRC_MAPPED       0xFF
+
+typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP
+{
+  UCHAR   ucIntSrcBitmap;
+}ATOM_CONNECTOR_INC_SRC_BITMAP;
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2
+{
+  ATOM_COMMON_TABLE_HEADER      sHeader;
+  USHORT                        usDeviceSupport;
+  ATOM_CONNECTOR_INFO_I2C       asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
+  ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
+}ATOM_SUPPORTED_DEVICES_INFO_2;
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1
+{
+  ATOM_COMMON_TABLE_HEADER      sHeader;
+  USHORT                        usDeviceSupport;
+  ATOM_CONNECTOR_INFO_I2C       asConnInfo[ATOM_MAX_SUPPORTED_DEVICE];
+  ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE];
+}ATOM_SUPPORTED_DEVICES_INFO_2d1;
+
+#define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1
+
+
+
+typedef struct _ATOM_MISC_CONTROL_INFO
+{
+   USHORT usFrequency;
+   UCHAR  ucPLL_ChargePump;                            // PLL charge-pump gain control
+   UCHAR  ucPLL_DutyCycle;                            // PLL duty cycle control
+   UCHAR  ucPLL_VCO_Gain;                              // PLL VCO gain control
+   UCHAR  ucPLL_VoltageSwing;                         // PLL driver voltage swing control
+}ATOM_MISC_CONTROL_INFO;
+
+
+#define ATOM_MAX_MISC_INFO       4
+
+typedef struct _ATOM_TMDS_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  USHORT                     usMaxFrequency;             // in 10Khz
+  ATOM_MISC_CONTROL_INFO            asMiscInfo[ATOM_MAX_MISC_INFO];
+}ATOM_TMDS_INFO;
+
+
+typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE
+{
+  UCHAR ucTVStandard;     //Same as TV standards defined above,
+  UCHAR ucPadding[1];
+}ATOM_ENCODER_ANALOG_ATTRIBUTE;
+
+typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE
+{
+  UCHAR ucAttribute;      //Same as other digital encoder attributes defined above
+  UCHAR ucPadding[1];
+}ATOM_ENCODER_DIGITAL_ATTRIBUTE;
+
+typedef union _ATOM_ENCODER_ATTRIBUTE
+{
+  ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib;
+  ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib;
+}ATOM_ENCODER_ATTRIBUTE;
+
+
+typedef struct _DVO_ENCODER_CONTROL_PARAMETERS
+{
+  USHORT usPixelClock;
+  USHORT usEncoderID;
+  UCHAR  ucDeviceType;                                    //Use ATOM_DEVICE_xxx1_Index to indicate device type only.
+  UCHAR  ucAction;                                          //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
+  ATOM_ENCODER_ATTRIBUTE usDevAttr;
+}DVO_ENCODER_CONTROL_PARAMETERS;
+
+typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION
+{
+  DVO_ENCODER_CONTROL_PARAMETERS    sDVOEncoder;
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION      sReserved;     //Caller doesn't need to init this portion
+}DVO_ENCODER_CONTROL_PS_ALLOCATION;
+
+
+#define ATOM_XTMDS_ASIC_SI164_ID        1
+#define ATOM_XTMDS_ASIC_SI178_ID        2
+#define ATOM_XTMDS_ASIC_TFP513_ID       3
+#define ATOM_XTMDS_SUPPORTED_SINGLELINK 0x00000001
+#define ATOM_XTMDS_SUPPORTED_DUALLINK   0x00000002
+#define ATOM_XTMDS_MVPU_FPGA            0x00000004
+
+
+typedef struct _ATOM_XTMDS_INFO
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  USHORT                     usSingleLinkMaxFrequency;
+  ATOM_I2C_ID_CONFIG_ACCESS  sucI2cId;           //Point the ID on which I2C is used to control external chip
+  UCHAR                      ucXtransimitterID;
+  UCHAR                      ucSupportedLink;    // Bit field, bit0=1, single link supported;bit1=1,dual link supported
+  UCHAR                      ucSequnceAlterID;   // Even with the same external TMDS asic, it's possible that the program seqence alters
+                                                 // due to design. This ID is used to alert driver that the sequence is not "standard"!
+  UCHAR                      ucMasterAddress;    // Address to control Master xTMDS Chip
+  UCHAR                      ucSlaveAddress;     // Address to control Slave xTMDS Chip
+}ATOM_XTMDS_INFO;
+
+typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS
+{
+  UCHAR ucEnable;                     // ATOM_ENABLE=On or ATOM_DISABLE=Off
+  UCHAR ucDevice;                     // ATOM_DEVICE_DFP1_INDEX....
+  UCHAR ucPadding[2];
+}DFP_DPMS_STATUS_CHANGE_PARAMETERS;
+
+/****************************Legacy Power Play Table Definitions **********************/
+
+//Definitions for ulPowerPlayMiscInfo
+#define ATOM_PM_MISCINFO_SPLIT_CLOCK                     0x00000000L
+#define ATOM_PM_MISCINFO_USING_MCLK_SRC                  0x00000001L
+#define ATOM_PM_MISCINFO_USING_SCLK_SRC                  0x00000002L
+
+#define ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT            0x00000004L
+#define ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH        0x00000008L
+
+#define ATOM_PM_MISCINFO_LOAD_PERFORMANCE_EN             0x00000010L
+
+#define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN          0x00000020L
+#define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN          0x00000040L
+#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE                 0x00000080L  //When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program
+
+#define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN      0x00000100L
+#define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN         0x00000200L
+#define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN              0x00000400L
+#define ATOM_PM_MISCINFO_LOAD_BALANCE_EN                 0x00000800L
+#define ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE     0x00001000L
+#define ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE 0x00002000L
+#define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE            0x00004000L
+
+#define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE             0x00008000L
+#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE                 0x00010000L
+#define ATOM_PM_MISCINFO_OVER_DRIVE_MODE                 0x00020000L
+#define ATOM_PM_MISCINFO_POWER_SAVING_MODE               0x00040000L
+#define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE              0x00080000L
+
+#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK           0x00300000L  //0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved
+#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT          20
+
+#define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE                 0x00400000L
+#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2      0x00800000L
+#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4      0x01000000L
+#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN            0x02000000L  //When set, Dynamic
+#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN        0x04000000L  //When set, Dynamic
+#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN              0x08000000L  //When set, This mode is for acceleated 3D mode
+
+#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK   0x70000000L  //1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks)
+#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT  28
+#define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS                0x80000000L
+
+#define ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE            0x00000001L
+#define ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT          0x00000002L
+#define ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN           0x00000004L
+#define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO            0x00000008L
+#define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE              0x00000010L
+#define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN       0x00000020L
+#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE         0x00000040L  //If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption.
+                                                                      //If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback
+#define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC                0x00000080L
+#define ATOM_PM_MISCINFO2_STUTTER_MODE_EN                0x00000100L
+#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE               0x00000200L
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=1
+typedef struct  _ATOM_POWERMODE_INFO
+{
+  ULONG     ulMiscInfo;                 //The power level should be arranged in ascending order
+  ULONG     ulReserved1;                // must set to 0
+  ULONG     ulReserved2;                // must set to 0
+  USHORT    usEngineClock;
+  USHORT    usMemoryClock;
+  UCHAR     ucVoltageDropIndex;         // index to GPIO table
+  UCHAR     ucSelectedPanel_RefreshRate;// panel refresh rate
+  UCHAR     ucMinTemperature;
+  UCHAR     ucMaxTemperature;
+  UCHAR     ucNumPciELanes;             // number of PCIE lanes
+}ATOM_POWERMODE_INFO;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=1
+typedef struct  _ATOM_POWERMODE_INFO_V2
+{
+  ULONG     ulMiscInfo;                 //The power level should be arranged in ascending order
+  ULONG     ulMiscInfo2;
+  ULONG     ulEngineClock;
+  ULONG     ulMemoryClock;
+  UCHAR     ucVoltageDropIndex;         // index to GPIO table
+  UCHAR     ucSelectedPanel_RefreshRate;// panel refresh rate
+  UCHAR     ucMinTemperature;
+  UCHAR     ucMaxTemperature;
+  UCHAR     ucNumPciELanes;             // number of PCIE lanes
+}ATOM_POWERMODE_INFO_V2;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=2
+typedef struct  _ATOM_POWERMODE_INFO_V3
+{
+  ULONG     ulMiscInfo;                 //The power level should be arranged in ascending order
+  ULONG     ulMiscInfo2;
+  ULONG     ulEngineClock;
+  ULONG     ulMemoryClock;
+  UCHAR     ucVoltageDropIndex;         // index to Core (VDDC) votage table
+  UCHAR     ucSelectedPanel_RefreshRate;// panel refresh rate
+  UCHAR     ucMinTemperature;
+  UCHAR     ucMaxTemperature;
+  UCHAR     ucNumPciELanes;             // number of PCIE lanes
+  UCHAR     ucVDDCI_VoltageDropIndex;   // index to VDDCI votage table
+}ATOM_POWERMODE_INFO_V3;
+
+
+#define ATOM_MAX_NUMBEROF_POWER_BLOCK  8
+
+#define ATOM_PP_OVERDRIVE_INTBITMAP_AUXWIN            0x01
+#define ATOM_PP_OVERDRIVE_INTBITMAP_OVERDRIVE         0x02
+
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM63      0x01
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1032   0x02
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1030   0x03
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649   0x04
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64      0x05
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375    0x06
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512   0x07   // Andigilog
+
+
+typedef struct  _ATOM_POWERPLAY_INFO
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  UCHAR    ucOverdriveThermalController;
+  UCHAR    ucOverdriveI2cLine;
+  UCHAR    ucOverdriveIntBitmap;
+  UCHAR    ucOverdriveControllerAddress;
+  UCHAR    ucSizeOfPowerModeEntry;
+  UCHAR    ucNumOfPowerModeEntries;
+  ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO;
+
+typedef struct  _ATOM_POWERPLAY_INFO_V2
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  UCHAR    ucOverdriveThermalController;
+  UCHAR    ucOverdriveI2cLine;
+  UCHAR    ucOverdriveIntBitmap;
+  UCHAR    ucOverdriveControllerAddress;
+  UCHAR    ucSizeOfPowerModeEntry;
+  UCHAR    ucNumOfPowerModeEntries;
+  ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO_V2;
+
+typedef struct  _ATOM_POWERPLAY_INFO_V3
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  UCHAR    ucOverdriveThermalController;
+  UCHAR    ucOverdriveI2cLine;
+  UCHAR    ucOverdriveIntBitmap;
+  UCHAR    ucOverdriveControllerAddress;
+  UCHAR    ucSizeOfPowerModeEntry;
+  UCHAR    ucNumOfPowerModeEntries;
+  ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO_V3;
+
+
+
+/**************************************************************************/
+
+
+// Following definitions are for compatiblity issue in different SW components.
+#define ATOM_MASTER_DATA_TABLE_REVISION   0x01
+#define Object_Info                       Object_Header
+#define AdjustARB_SEQ                     MC_InitParameter
+#define VRAM_GPIO_DetectionInfo           VoltageObjectInfo
+#define ASIC_VDDCI_Info                   ASIC_ProfilingInfo
+#define ASIC_MVDDQ_Info                   MemoryTrainingInfo
+#define SS_Info                           PPLL_SS_Info
+#define ASIC_MVDDC_Info                   ASIC_InternalSS_Info
+#define DispDevicePriorityInfo            SaveRestoreInfo
+#define DispOutInfo                       TV_VideoMode
+
+
+#define ATOM_ENCODER_OBJECT_TABLE         ATOM_OBJECT_TABLE
+#define ATOM_CONNECTOR_OBJECT_TABLE       ATOM_OBJECT_TABLE
+
+//New device naming, remove them when both DAL/VBIOS is ready
+#define DFP2I_OUTPUT_CONTROL_PARAMETERS    CRT1_OUTPUT_CONTROL_PARAMETERS
+#define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS
+
+#define DFP1X_OUTPUT_CONTROL_PARAMETERS    CRT1_OUTPUT_CONTROL_PARAMETERS
+#define DFP1X_OUTPUT_CONTROL_PS_ALLOCATION DFP1X_OUTPUT_CONTROL_PARAMETERS
+
+#define DFP1I_OUTPUT_CONTROL_PARAMETERS    DFP1_OUTPUT_CONTROL_PARAMETERS
+#define DFP1I_OUTPUT_CONTROL_PS_ALLOCATION DFP1_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define ATOM_DEVICE_DFP1I_SUPPORT          ATOM_DEVICE_DFP1_SUPPORT
+#define ATOM_DEVICE_DFP1X_SUPPORT          ATOM_DEVICE_DFP2_SUPPORT
+
+#define ATOM_DEVICE_DFP1I_INDEX            ATOM_DEVICE_DFP1_INDEX
+#define ATOM_DEVICE_DFP1X_INDEX            ATOM_DEVICE_DFP2_INDEX
+
+#define ATOM_DEVICE_DFP2I_INDEX            0x00000009
+#define ATOM_DEVICE_DFP2I_SUPPORT          (0x1L << ATOM_DEVICE_DFP2I_INDEX)
+
+#define ATOM_S0_DFP1I                      ATOM_S0_DFP1
+#define ATOM_S0_DFP1X                      ATOM_S0_DFP2
+
+#define ATOM_S0_DFP2I                      0x00200000L
+#define ATOM_S0_DFP2Ib2                    0x20
+
+#define ATOM_S2_DFP1I_DPMS_STATE           ATOM_S2_DFP1_DPMS_STATE
+#define ATOM_S2_DFP1X_DPMS_STATE           ATOM_S2_DFP2_DPMS_STATE
+
+#define ATOM_S2_DFP2I_DPMS_STATE           0x02000000L
+#define ATOM_S2_DFP2I_DPMS_STATEb3         0x02
+
+#define ATOM_S3_DFP2I_ACTIVEb1             0x02
+
+#define ATOM_S3_DFP1I_ACTIVE               ATOM_S3_DFP1_ACTIVE
+#define ATOM_S3_DFP1X_ACTIVE               ATOM_S3_DFP2_ACTIVE
+
+#define ATOM_S3_DFP2I_ACTIVE               0x00000200L
+
+#define ATOM_S3_DFP1I_CRTC_ACTIVE          ATOM_S3_DFP1_CRTC_ACTIVE
+#define ATOM_S3_DFP1X_CRTC_ACTIVE          ATOM_S3_DFP2_CRTC_ACTIVE
+#define ATOM_S3_DFP2I_CRTC_ACTIVE          0x02000000L
+
+
+#define ATOM_S3_DFP2I_CRTC_ACTIVEb3        0x02
+#define ATOM_S5_DOS_REQ_DFP2Ib1            0x02
+
+#define ATOM_S5_DOS_REQ_DFP2I              0x0200
+#define ATOM_S6_ACC_REQ_DFP1I              ATOM_S6_ACC_REQ_DFP1
+#define ATOM_S6_ACC_REQ_DFP1X              ATOM_S6_ACC_REQ_DFP2
+
+#define ATOM_S6_ACC_REQ_DFP2Ib3            0x02
+#define ATOM_S6_ACC_REQ_DFP2I              0x02000000L
+
+#define TMDS1XEncoderControl               DVOEncoderControl
+#define DFP1XOutputControl                 DVOOutputControl
+
+#define ExternalDFPOutputControl           DFP1XOutputControl
+#define EnableExternalTMDS_Encoder         TMDS1XEncoderControl
+
+#define DFP1IOutputControl                 TMDSAOutputControl
+#define DFP2IOutputControl                 LVTMAOutputControl
+
+#define DAC1_ENCODER_CONTROL_PARAMETERS    DAC_ENCODER_CONTROL_PARAMETERS
+#define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
+
+#define DAC2_ENCODER_CONTROL_PARAMETERS    DAC_ENCODER_CONTROL_PARAMETERS
+#define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
+
+#define ucDac1Standard  ucDacStandard
+#define ucDac2Standard  ucDacStandard
+
+#define TMDS1EncoderControl TMDSAEncoderControl
+#define TMDS2EncoderControl LVTMAEncoderControl
+
+#define DFP1OutputControl   TMDSAOutputControl
+#define DFP2OutputControl   LVTMAOutputControl
+#define CRT1OutputControl   DAC1OutputControl
+#define CRT2OutputControl   DAC2OutputControl
+
+//These two lines will be removed for sure in a few days, will follow up with Michael V.
+#define EnableLVDS_SS   EnableSpreadSpectrumOnPPLL
+#define ENABLE_LVDS_SS_PARAMETERS_V3  ENABLE_SPREAD_SPECTRUM_ON_PPLL
+
+#define ATOM_S2_CRT1_DPMS_STATE         0x00010000L
+#define ATOM_S2_LCD1_DPMS_STATE           ATOM_S2_CRT1_DPMS_STATE
+#define ATOM_S2_TV1_DPMS_STATE          ATOM_S2_CRT1_DPMS_STATE
+#define ATOM_S2_DFP1_DPMS_STATE         ATOM_S2_CRT1_DPMS_STATE
+#define ATOM_S2_CRT2_DPMS_STATE         ATOM_S2_CRT1_DPMS_STATE
+
+#define ATOM_S6_ACC_REQ_TV2             0x00400000L
+#define ATOM_DEVICE_TV2_INDEX           0x00000006
+#define ATOM_DEVICE_TV2_SUPPORT         (0x1L << ATOM_DEVICE_TV2_INDEX)
+#define ATOM_S0_TV2                     0x00100000L
+#define ATOM_S3_TV2_ACTIVE              ATOM_S3_DFP6_ACTIVE
+#define ATOM_S3_TV2_CRTC_ACTIVE         ATOM_S3_DFP6_CRTC_ACTIVE
+
+/*********************************************************************************/
+
+#pragma pack() // BIOS data must use byte aligment
+
+#pragma pack(1)
+
+typedef struct _ATOM_HOLE_INFO
+{
+	USHORT	usOffset;		// offset of the hole ( from the start of the binary )
+	USHORT	usLength;		// length of the hole ( in bytes )
+}ATOM_HOLE_INFO;
+
+typedef struct _ATOM_SERVICE_DESCRIPTION
+{
+   UCHAR   ucRevision;                               // Holes set revision
+   UCHAR   ucAlgorithm;                              // Hash algorithm
+   UCHAR   ucSignatureType;							 // Signature type ( 0 - no signature, 1 - test, 2 - production )
+   UCHAR   ucReserved;
+   USHORT  usSigOffset;							     // Signature offset ( from the start of the binary )
+   USHORT  usSigLength;                              // Signature length
+}ATOM_SERVICE_DESCRIPTION;
+
+
+typedef struct _ATOM_SERVICE_INFO
+{
+      ATOM_COMMON_TABLE_HEADER      asHeader;
+      ATOM_SERVICE_DESCRIPTION		asDescr;
+	  UCHAR							ucholesNo;		// number of holes that follow
+	  ATOM_HOLE_INFO				holes[1];       // array of hole descriptions
+}ATOM_SERVICE_INFO;
+
+
+
+#pragma pack() // BIOS data must use byte aligment
+
+//
+// AMD ACPI Table
+//
+#pragma pack(1)
+
+typedef struct {
+  ULONG Signature;
+  ULONG TableLength;      //Length
+  UCHAR Revision;
+  UCHAR Checksum;
+  UCHAR OemId[6];
+  UCHAR OemTableId[8];    //UINT64  OemTableId;
+  ULONG OemRevision;
+  ULONG CreatorId;
+  ULONG CreatorRevision;
+} AMD_ACPI_DESCRIPTION_HEADER;
+/*
+//EFI_ACPI_DESCRIPTION_HEADER from AcpiCommon.h
+typedef struct {
+  UINT32  Signature;       //0x0
+  UINT32  Length;          //0x4
+  UINT8   Revision;        //0x8
+  UINT8   Checksum;        //0x9
+  UINT8   OemId[6];        //0xA
+  UINT64  OemTableId;      //0x10
+  UINT32  OemRevision;     //0x18
+  UINT32  CreatorId;       //0x1C
+  UINT32  CreatorRevision; //0x20
+}EFI_ACPI_DESCRIPTION_HEADER;
+*/
+typedef struct {
+  AMD_ACPI_DESCRIPTION_HEADER SHeader;
+  UCHAR TableUUID[16];    //0x24
+  ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture.
+  ULONG Lib1ImageOffset;  //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture.
+  ULONG Reserved[4];      //0x3C
+}UEFI_ACPI_VFCT;
+
+typedef struct {
+  ULONG  PCIBus;          //0x4C
+  ULONG  PCIDevice;       //0x50
+  ULONG  PCIFunction;     //0x54
+  USHORT VendorID;        //0x58
+  USHORT DeviceID;        //0x5A
+  USHORT SSVID;           //0x5C
+  USHORT SSID;            //0x5E
+  ULONG  Revision;        //0x60
+  ULONG  ImageLength;     //0x64
+}VFCT_IMAGE_HEADER;
+
+
+typedef struct {
+  VFCT_IMAGE_HEADER   VbiosHeader;
+  UCHAR   VbiosContent[1];
+}GOP_VBIOS_CONTENT;
+
+typedef struct {
+  VFCT_IMAGE_HEADER   Lib1Header;
+  UCHAR   Lib1Content[1];
+}GOP_LIB1_CONTENT;
+
+#pragma pack()
+
+
+#endif /* _ATOMBIOS_H */
+
+#include "pptable.h"
+

+ 807 - 0
drivers/gpu/drm/amd/amdgpu/atombios_crtc.c

@@ -0,0 +1,807 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/amdgpu_drm.h>
+#include <drm/drm_fixed.h>
+#include "amdgpu.h"
+#include "atom.h"
+#include "atom-bits.h"
+#include "atombios_encoders.h"
+#include "amdgpu_atombios.h"
+#include "amdgpu_pll.h"
+#include "amdgpu_connectors.h"
+
+void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc,
+				  struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	SET_CRTC_OVERSCAN_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan);
+	int a1, a2;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucCRTC = amdgpu_crtc->crtc_id;
+
+	switch (amdgpu_crtc->rmx_type) {
+	case RMX_CENTER:
+		args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
+		args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
+		args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
+		args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
+		break;
+	case RMX_ASPECT:
+		a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
+		a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
+
+		if (a1 > a2) {
+			args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
+			args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
+		} else if (a2 > a1) {
+			args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
+			args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
+		}
+		break;
+	case RMX_FULL:
+	default:
+		args.usOverscanRight = cpu_to_le16(amdgpu_crtc->h_border);
+		args.usOverscanLeft = cpu_to_le16(amdgpu_crtc->h_border);
+		args.usOverscanBottom = cpu_to_le16(amdgpu_crtc->v_border);
+		args.usOverscanTop = cpu_to_le16(amdgpu_crtc->v_border);
+		break;
+	}
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	ENABLE_SCALER_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucScaler = amdgpu_crtc->crtc_id;
+
+	switch (amdgpu_crtc->rmx_type) {
+	case RMX_FULL:
+		args.ucEnable = ATOM_SCALER_EXPANSION;
+		break;
+	case RMX_CENTER:
+		args.ucEnable = ATOM_SCALER_CENTER;
+		break;
+	case RMX_ASPECT:
+		args.ucEnable = ATOM_SCALER_EXPANSION;
+		break;
+	default:
+		args.ucEnable = ATOM_SCALER_DISABLE;
+		break;
+	}
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	int index =
+	    GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters);
+	ENABLE_CRTC_PS_ALLOCATION args;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucCRTC = amdgpu_crtc->crtc_id;
+	args.ucEnable = lock;
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC);
+	ENABLE_CRTC_PS_ALLOCATION args;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucCRTC = amdgpu_crtc->crtc_id;
+	args.ucEnable = state;
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
+	BLANK_CRTC_PS_ALLOCATION args;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucCRTC = amdgpu_crtc->crtc_id;
+	args.ucBlanking = state;
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
+	ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucDispPipeId = amdgpu_crtc->crtc_id;
+	args.ucEnable = state;
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
+{
+	int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
+	ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucEnable = ATOM_INIT;
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc,
+				  struct drm_display_mode *mode)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	SET_CRTC_USING_DTD_TIMING_PARAMETERS args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming);
+	u16 misc = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.usH_Size = cpu_to_le16(mode->crtc_hdisplay - (amdgpu_crtc->h_border * 2));
+	args.usH_Blanking_Time =
+		cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay + (amdgpu_crtc->h_border * 2));
+	args.usV_Size = cpu_to_le16(mode->crtc_vdisplay - (amdgpu_crtc->v_border * 2));
+	args.usV_Blanking_Time =
+		cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay + (amdgpu_crtc->v_border * 2));
+	args.usH_SyncOffset =
+		cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay + amdgpu_crtc->h_border);
+	args.usH_SyncWidth =
+		cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start);
+	args.usV_SyncOffset =
+		cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay + amdgpu_crtc->v_border);
+	args.usV_SyncWidth =
+		cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
+	args.ucH_Border = amdgpu_crtc->h_border;
+	args.ucV_Border = amdgpu_crtc->v_border;
+
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		misc |= ATOM_VSYNC_POLARITY;
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		misc |= ATOM_HSYNC_POLARITY;
+	if (mode->flags & DRM_MODE_FLAG_CSYNC)
+		misc |= ATOM_COMPOSITESYNC;
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		misc |= ATOM_INTERLACE;
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		misc |= ATOM_DOUBLE_CLOCK_MODE;
+
+	args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+	args.ucCRTC = amdgpu_crtc->crtc_id;
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+union atom_enable_ss {
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2;
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3;
+};
+
+static void amdgpu_atombios_crtc_program_ss(struct amdgpu_device *adev,
+				     int enable,
+				     int pll_id,
+				     int crtc_id,
+				     struct amdgpu_atom_ss *ss)
+{
+	unsigned i;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
+	union atom_enable_ss args;
+
+	if (enable) {
+		/* Don't mess with SS if percentage is 0 or external ss.
+		 * SS is already disabled previously, and disabling it
+		 * again can cause display problems if the pll is already
+		 * programmed.
+		 */
+		if (ss->percentage == 0)
+			return;
+		if (ss->type & ATOM_EXTERNAL_SS_MASK)
+			return;
+	} else {
+		for (i = 0; i < adev->mode_info.num_crtc; i++) {
+			if (adev->mode_info.crtcs[i] &&
+			    adev->mode_info.crtcs[i]->enabled &&
+			    i != crtc_id &&
+			    pll_id == adev->mode_info.crtcs[i]->pll_id) {
+				/* one other crtc is using this pll don't turn
+				 * off spread spectrum as it might turn off
+				 * display on active crtc
+				 */
+				return;
+			}
+		}
+	}
+
+	memset(&args, 0, sizeof(args));
+
+	args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0);
+	args.v3.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
+	switch (pll_id) {
+	case ATOM_PPLL1:
+		args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
+		break;
+	case ATOM_PPLL2:
+		args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
+		break;
+	case ATOM_DCPLL:
+		args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
+		break;
+	case ATOM_PPLL_INVALID:
+		return;
+	}
+	args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+	args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
+	args.v3.ucEnable = enable;
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+union adjust_pixel_clock {
+	ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
+	ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3;
+};
+
+static u32 amdgpu_atombios_crtc_adjust_pll(struct drm_crtc *crtc,
+				    struct drm_display_mode *mode)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct drm_encoder *encoder = amdgpu_crtc->encoder;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+	u32 adjusted_clock = mode->clock;
+	int encoder_mode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
+	u32 dp_clock = mode->clock;
+	u32 clock = mode->clock;
+	int bpc = amdgpu_crtc->bpc;
+	bool is_duallink = amdgpu_dig_monitor_is_duallink(encoder, mode->clock);
+	union adjust_pixel_clock args;
+	u8 frev, crev;
+	int index;
+
+	amdgpu_crtc->pll_flags = AMDGPU_PLL_USE_FRAC_FB_DIV;
+
+	if ((amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
+	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
+		if (connector) {
+			struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+			struct amdgpu_connector_atom_dig *dig_connector =
+				amdgpu_connector->con_priv;
+
+			dp_clock = dig_connector->dp_clock;
+		}
+	}
+
+	/* use recommended ref_div for ss */
+	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+		if (amdgpu_crtc->ss_enabled) {
+			if (amdgpu_crtc->ss.refdiv) {
+				amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_REF_DIV;
+				amdgpu_crtc->pll_reference_div = amdgpu_crtc->ss.refdiv;
+				amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_FRAC_FB_DIV;
+			}
+		}
+	}
+
+	/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
+	if (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
+		adjusted_clock = mode->clock * 2;
+	if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+		amdgpu_crtc->pll_flags |= AMDGPU_PLL_PREFER_CLOSEST_LOWER;
+	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+		amdgpu_crtc->pll_flags |= AMDGPU_PLL_IS_LCD;
+
+
+	/* adjust pll for deep color modes */
+	if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+		switch (bpc) {
+		case 8:
+		default:
+			break;
+		case 10:
+			clock = (clock * 5) / 4;
+			break;
+		case 12:
+			clock = (clock * 3) / 2;
+			break;
+		case 16:
+			clock = clock * 2;
+			break;
+		}
+	}
+
+	/* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock
+	 * accordingly based on the encoder/transmitter to work around
+	 * special hw requirements.
+	 */
+	index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
+	if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev,
+				   &crev))
+		return adjusted_clock;
+
+	memset(&args, 0, sizeof(args));
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+		case 2:
+			args.v1.usPixelClock = cpu_to_le16(clock / 10);
+			args.v1.ucTransmitterID = amdgpu_encoder->encoder_id;
+			args.v1.ucEncodeMode = encoder_mode;
+			if (amdgpu_crtc->ss_enabled && amdgpu_crtc->ss.percentage)
+				args.v1.ucConfig |=
+					ADJUST_DISPLAY_CONFIG_SS_ENABLE;
+
+			amdgpu_atom_execute_table(adev->mode_info.atom_context,
+					   index, (uint32_t *)&args);
+			adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
+			break;
+		case 3:
+			args.v3.sInput.usPixelClock = cpu_to_le16(clock / 10);
+			args.v3.sInput.ucTransmitterID = amdgpu_encoder->encoder_id;
+			args.v3.sInput.ucEncodeMode = encoder_mode;
+			args.v3.sInput.ucDispPllConfig = 0;
+			if (amdgpu_crtc->ss_enabled && amdgpu_crtc->ss.percentage)
+				args.v3.sInput.ucDispPllConfig |=
+					DISPPLL_CONFIG_SS_ENABLE;
+			if (ENCODER_MODE_IS_DP(encoder_mode)) {
+				args.v3.sInput.ucDispPllConfig |=
+					DISPPLL_CONFIG_COHERENT_MODE;
+				/* 16200 or 27000 */
+				args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
+			} else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+				if (dig->coherent_mode)
+					args.v3.sInput.ucDispPllConfig |=
+						DISPPLL_CONFIG_COHERENT_MODE;
+				if (is_duallink)
+					args.v3.sInput.ucDispPllConfig |=
+						DISPPLL_CONFIG_DUAL_LINK;
+			}
+			if (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
+			    ENCODER_OBJECT_ID_NONE)
+				args.v3.sInput.ucExtTransmitterID =
+					amdgpu_encoder_get_dp_bridge_encoder_id(encoder);
+			else
+				args.v3.sInput.ucExtTransmitterID = 0;
+
+			amdgpu_atom_execute_table(adev->mode_info.atom_context,
+					   index, (uint32_t *)&args);
+			adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
+			if (args.v3.sOutput.ucRefDiv) {
+				amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_FRAC_FB_DIV;
+				amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_REF_DIV;
+				amdgpu_crtc->pll_reference_div = args.v3.sOutput.ucRefDiv;
+			}
+			if (args.v3.sOutput.ucPostDiv) {
+				amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_FRAC_FB_DIV;
+				amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_POST_DIV;
+				amdgpu_crtc->pll_post_div = args.v3.sOutput.ucPostDiv;
+			}
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+			return adjusted_clock;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+		return adjusted_clock;
+	}
+
+	return adjusted_clock;
+}
+
+union set_pixel_clock {
+	SET_PIXEL_CLOCK_PS_ALLOCATION base;
+	PIXEL_CLOCK_PARAMETERS v1;
+	PIXEL_CLOCK_PARAMETERS_V2 v2;
+	PIXEL_CLOCK_PARAMETERS_V3 v3;
+	PIXEL_CLOCK_PARAMETERS_V5 v5;
+	PIXEL_CLOCK_PARAMETERS_V6 v6;
+};
+
+/* on DCE5, make sure the voltage is high enough to support the
+ * required disp clk.
+ */
+void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
+				    u32 dispclk)
+{
+	u8 frev, crev;
+	int index;
+	union set_pixel_clock args;
+
+	memset(&args, 0, sizeof(args));
+
+	index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
+	if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev,
+				   &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 5:
+			/* if the default dcpll clock is specified,
+			 * SetPixelClock provides the dividers
+			 */
+			args.v5.ucCRTC = ATOM_CRTC_INVALID;
+			args.v5.usPixelClock = cpu_to_le16(dispclk);
+			args.v5.ucPpll = ATOM_DCPLL;
+			break;
+		case 6:
+			/* if the default dcpll clock is specified,
+			 * SetPixelClock provides the dividers
+			 */
+			args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
+			args.v6.ucPpll = ATOM_EXT_PLL1;
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+			return;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+		return;
+	}
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static bool is_pixel_clock_source_from_pll(u32 encoder_mode, int pll_id)
+{
+	if (ENCODER_MODE_IS_DP(encoder_mode)) {
+		if (pll_id < ATOM_EXT_PLL1)
+			return true;
+		else
+			return false;
+	} else {
+		return true;
+	}
+}
+
+void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
+			       u32 crtc_id,
+			       int pll_id,
+			       u32 encoder_mode,
+			       u32 encoder_id,
+			       u32 clock,
+			       u32 ref_div,
+			       u32 fb_div,
+			       u32 frac_fb_div,
+			       u32 post_div,
+			       int bpc,
+			       bool ss_enabled,
+			       struct amdgpu_atom_ss *ss)
+{
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	u8 frev, crev;
+	int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
+	union set_pixel_clock args;
+
+	memset(&args, 0, sizeof(args));
+
+	if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev,
+				   &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+			if (clock == ATOM_DISABLE)
+				return;
+			args.v1.usPixelClock = cpu_to_le16(clock / 10);
+			args.v1.usRefDiv = cpu_to_le16(ref_div);
+			args.v1.usFbDiv = cpu_to_le16(fb_div);
+			args.v1.ucFracFbDiv = frac_fb_div;
+			args.v1.ucPostDiv = post_div;
+			args.v1.ucPpll = pll_id;
+			args.v1.ucCRTC = crtc_id;
+			args.v1.ucRefDivSrc = 1;
+			break;
+		case 2:
+			args.v2.usPixelClock = cpu_to_le16(clock / 10);
+			args.v2.usRefDiv = cpu_to_le16(ref_div);
+			args.v2.usFbDiv = cpu_to_le16(fb_div);
+			args.v2.ucFracFbDiv = frac_fb_div;
+			args.v2.ucPostDiv = post_div;
+			args.v2.ucPpll = pll_id;
+			args.v2.ucCRTC = crtc_id;
+			args.v2.ucRefDivSrc = 1;
+			break;
+		case 3:
+			args.v3.usPixelClock = cpu_to_le16(clock / 10);
+			args.v3.usRefDiv = cpu_to_le16(ref_div);
+			args.v3.usFbDiv = cpu_to_le16(fb_div);
+			args.v3.ucFracFbDiv = frac_fb_div;
+			args.v3.ucPostDiv = post_div;
+			args.v3.ucPpll = pll_id;
+			if (crtc_id == ATOM_CRTC2)
+				args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2;
+			else
+				args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1;
+			if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
+				args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
+			args.v3.ucTransmitterId = encoder_id;
+			args.v3.ucEncoderMode = encoder_mode;
+			break;
+		case 5:
+			args.v5.ucCRTC = crtc_id;
+			args.v5.usPixelClock = cpu_to_le16(clock / 10);
+			args.v5.ucRefDiv = ref_div;
+			args.v5.usFbDiv = cpu_to_le16(fb_div);
+			args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
+			args.v5.ucPostDiv = post_div;
+			args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
+			if ((ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) &&
+			    (pll_id < ATOM_EXT_PLL1))
+				args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC;
+			if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+				switch (bpc) {
+				case 8:
+				default:
+					args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
+					break;
+				case 10:
+					/* yes this is correct, the atom define is wrong */
+					args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP;
+					break;
+				case 12:
+					/* yes this is correct, the atom define is wrong */
+					args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
+					break;
+				}
+			}
+			args.v5.ucTransmitterID = encoder_id;
+			args.v5.ucEncoderMode = encoder_mode;
+			args.v5.ucPpll = pll_id;
+			break;
+		case 6:
+			args.v6.ulDispEngClkFreq = cpu_to_le32(crtc_id << 24 | clock / 10);
+			args.v6.ucRefDiv = ref_div;
+			args.v6.usFbDiv = cpu_to_le16(fb_div);
+			args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
+			args.v6.ucPostDiv = post_div;
+			args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
+			if ((ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) &&
+			    (pll_id < ATOM_EXT_PLL1) &&
+			    !is_pixel_clock_source_from_pll(encoder_mode, pll_id))
+				args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
+			if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+				switch (bpc) {
+				case 8:
+				default:
+					args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
+					break;
+				case 10:
+					args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6;
+					break;
+				case 12:
+					args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6;
+					break;
+				case 16:
+					args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
+					break;
+				}
+			}
+			args.v6.ucTransmitterID = encoder_id;
+			args.v6.ucEncoderMode = encoder_mode;
+			args.v6.ucPpll = pll_id;
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+			return;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+		return;
+	}
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+int amdgpu_atombios_crtc_prepare_pll(struct drm_crtc *crtc,
+			      struct drm_display_mode *mode)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_encoder *amdgpu_encoder =
+		to_amdgpu_encoder(amdgpu_crtc->encoder);
+	int encoder_mode = amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
+
+	amdgpu_crtc->bpc = 8;
+	amdgpu_crtc->ss_enabled = false;
+
+	if ((amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
+	    (amdgpu_encoder_get_dp_bridge_encoder_id(amdgpu_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) {
+		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+		struct drm_connector *connector =
+			amdgpu_get_connector_for_encoder(amdgpu_crtc->encoder);
+		struct amdgpu_connector *amdgpu_connector =
+			to_amdgpu_connector(connector);
+		struct amdgpu_connector_atom_dig *dig_connector =
+			amdgpu_connector->con_priv;
+		int dp_clock;
+
+		/* Assign mode clock for hdmi deep color max clock limit check */
+		amdgpu_connector->pixelclock_for_modeset = mode->clock;
+		amdgpu_crtc->bpc = amdgpu_connector_get_monitor_bpc(connector);
+
+		switch (encoder_mode) {
+		case ATOM_ENCODER_MODE_DP_MST:
+		case ATOM_ENCODER_MODE_DP:
+			/* DP/eDP */
+			dp_clock = dig_connector->dp_clock / 10;
+			amdgpu_crtc->ss_enabled =
+				amdgpu_atombios_get_asic_ss_info(adev, &amdgpu_crtc->ss,
+								 ASIC_INTERNAL_SS_ON_DP,
+								 dp_clock);
+			break;
+		case ATOM_ENCODER_MODE_LVDS:
+			amdgpu_crtc->ss_enabled =
+				amdgpu_atombios_get_asic_ss_info(adev,
+								 &amdgpu_crtc->ss,
+								 dig->lcd_ss_id,
+								 mode->clock / 10);
+			break;
+		case ATOM_ENCODER_MODE_DVI:
+			amdgpu_crtc->ss_enabled =
+				amdgpu_atombios_get_asic_ss_info(adev,
+								 &amdgpu_crtc->ss,
+								 ASIC_INTERNAL_SS_ON_TMDS,
+								 mode->clock / 10);
+			break;
+		case ATOM_ENCODER_MODE_HDMI:
+			amdgpu_crtc->ss_enabled =
+				amdgpu_atombios_get_asic_ss_info(adev,
+								 &amdgpu_crtc->ss,
+								 ASIC_INTERNAL_SS_ON_HDMI,
+								 mode->clock / 10);
+			break;
+		default:
+			break;
+		}
+	}
+
+	/* adjust pixel clock as needed */
+	amdgpu_crtc->adjusted_clock = amdgpu_atombios_crtc_adjust_pll(crtc, mode);
+
+	return 0;
+}
+
+void amdgpu_atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_encoder *amdgpu_encoder =
+		to_amdgpu_encoder(amdgpu_crtc->encoder);
+	u32 pll_clock = mode->clock;
+	u32 clock = mode->clock;
+	u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
+	struct amdgpu_pll *pll;
+	int encoder_mode = amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
+
+	/* pass the actual clock to amdgpu_atombios_crtc_program_pll for HDMI */
+	if ((encoder_mode == ATOM_ENCODER_MODE_HDMI) &&
+	    (amdgpu_crtc->bpc > 8))
+		clock = amdgpu_crtc->adjusted_clock;
+
+	switch (amdgpu_crtc->pll_id) {
+	case ATOM_PPLL1:
+		pll = &adev->clock.ppll[0];
+		break;
+	case ATOM_PPLL2:
+		pll = &adev->clock.ppll[1];
+		break;
+	case ATOM_PPLL0:
+	case ATOM_PPLL_INVALID:
+	default:
+		pll = &adev->clock.ppll[2];
+		break;
+	}
+
+	/* update pll params */
+	pll->flags = amdgpu_crtc->pll_flags;
+	pll->reference_div = amdgpu_crtc->pll_reference_div;
+	pll->post_div = amdgpu_crtc->pll_post_div;
+
+	amdgpu_pll_compute(pll, amdgpu_crtc->adjusted_clock, &pll_clock,
+			    &fb_div, &frac_fb_div, &ref_div, &post_div);
+
+	amdgpu_atombios_crtc_program_ss(adev, ATOM_DISABLE, amdgpu_crtc->pll_id,
+				 amdgpu_crtc->crtc_id, &amdgpu_crtc->ss);
+
+	amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
+				  encoder_mode, amdgpu_encoder->encoder_id, clock,
+				  ref_div, fb_div, frac_fb_div, post_div,
+				  amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss);
+
+	if (amdgpu_crtc->ss_enabled) {
+		/* calculate ss amount and step size */
+		u32 step_size;
+		u32 amount = (((fb_div * 10) + frac_fb_div) *
+			      (u32)amdgpu_crtc->ss.percentage) /
+			(100 * (u32)amdgpu_crtc->ss.percentage_divider);
+		amdgpu_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
+		amdgpu_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
+			ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
+		if (amdgpu_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
+			step_size = (4 * amount * ref_div * ((u32)amdgpu_crtc->ss.rate * 2048)) /
+				(125 * 25 * pll->reference_freq / 100);
+		else
+			step_size = (2 * amount * ref_div * ((u32)amdgpu_crtc->ss.rate * 2048)) /
+				(125 * 25 * pll->reference_freq / 100);
+		amdgpu_crtc->ss.step = step_size;
+
+		amdgpu_atombios_crtc_program_ss(adev, ATOM_ENABLE, amdgpu_crtc->pll_id,
+					 amdgpu_crtc->crtc_id, &amdgpu_crtc->ss);
+	}
+}
+

+ 58 - 0
drivers/gpu/drm/amd/amdgpu/atombios_crtc.h

@@ -0,0 +1,58 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __ATOMBIOS_CRTC_H__
+#define __ATOMBIOS_CRTC_H__
+
+void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc,
+				  struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode);
+void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc);
+void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock);
+void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state);
+void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state);
+void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state);
+void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev);
+void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc,
+				  struct drm_display_mode *mode);
+void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
+				    u32 dispclk);
+void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
+			       u32 crtc_id,
+			       int pll_id,
+			       u32 encoder_mode,
+			       u32 encoder_id,
+			       u32 clock,
+			       u32 ref_div,
+			       u32 fb_div,
+			       u32 frac_fb_div,
+			       u32 post_div,
+			       int bpc,
+			       bool ss_enabled,
+			       struct amdgpu_atom_ss *ss);
+int amdgpu_atombios_crtc_prepare_pll(struct drm_crtc *crtc,
+			      struct drm_display_mode *mode);
+void amdgpu_atombios_crtc_set_pll(struct drm_crtc *crtc,
+			   struct drm_display_mode *mode);
+
+#endif

+ 775 - 0
drivers/gpu/drm/amd/amdgpu/atombios_dp.c

@@ -0,0 +1,775 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+
+#include "atom.h"
+#include "atom-bits.h"
+#include "atombios_encoders.h"
+#include "atombios_dp.h"
+#include "amdgpu_connectors.h"
+#include "amdgpu_atombios.h"
+#include <drm/drm_dp_helper.h>
+
+/* move these to drm_dp_helper.c/h */
+#define DP_LINK_CONFIGURATION_SIZE 9
+#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
+
+static char *voltage_names[] = {
+	"0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char *pre_emph_names[] = {
+	"0dB", "3.5dB", "6dB", "9.5dB"
+};
+
+/***** amdgpu AUX functions *****/
+
+union aux_channel_transaction {
+	PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
+	PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
+};
+
+static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan,
+				      u8 *send, int send_bytes,
+				      u8 *recv, int recv_size,
+				      u8 delay, u8 *ack)
+{
+	struct drm_device *dev = chan->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	union aux_channel_transaction args;
+	int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
+	unsigned char *base;
+	int recv_bytes;
+	int r = 0;
+
+	memset(&args, 0, sizeof(args));
+
+	mutex_lock(&chan->mutex);
+
+	base = (unsigned char *)(adev->mode_info.atom_context->scratch + 1);
+
+	amdgpu_atombios_copy_swap(base, send, send_bytes, true);
+
+	args.v2.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
+	args.v2.lpDataOut = cpu_to_le16((u16)(16 + 4));
+	args.v2.ucDataOutLen = 0;
+	args.v2.ucChannelID = chan->rec.i2c_id;
+	args.v2.ucDelay = delay / 10;
+	args.v2.ucHPD_ID = chan->rec.hpd;
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	*ack = args.v2.ucReplyStatus;
+
+	/* timeout */
+	if (args.v2.ucReplyStatus == 1) {
+		DRM_DEBUG_KMS("dp_aux_ch timeout\n");
+		r = -ETIMEDOUT;
+		goto done;
+	}
+
+	/* flags not zero */
+	if (args.v2.ucReplyStatus == 2) {
+		DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
+		r = -EIO;
+		goto done;
+	}
+
+	/* error */
+	if (args.v2.ucReplyStatus == 3) {
+		DRM_DEBUG_KMS("dp_aux_ch error\n");
+		r = -EIO;
+		goto done;
+	}
+
+	recv_bytes = args.v1.ucDataOutLen;
+	if (recv_bytes > recv_size)
+		recv_bytes = recv_size;
+
+	if (recv && recv_size)
+		amdgpu_atombios_copy_swap(recv, base + 16, recv_bytes, false);
+
+	r = recv_bytes;
+done:
+	mutex_unlock(&chan->mutex);
+
+	return r;
+}
+
+#define BARE_ADDRESS_SIZE 3
+#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
+
+static ssize_t
+amdgpu_atombios_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+	struct amdgpu_i2c_chan *chan =
+		container_of(aux, struct amdgpu_i2c_chan, aux);
+	int ret;
+	u8 tx_buf[20];
+	size_t tx_size;
+	u8 ack, delay = 0;
+
+	if (WARN_ON(msg->size > 16))
+		return -E2BIG;
+
+	tx_buf[0] = msg->address & 0xff;
+	tx_buf[1] = msg->address >> 8;
+	tx_buf[2] = msg->request << 4;
+	tx_buf[3] = msg->size ? (msg->size - 1) : 0;
+
+	switch (msg->request & ~DP_AUX_I2C_MOT) {
+	case DP_AUX_NATIVE_WRITE:
+	case DP_AUX_I2C_WRITE:
+		/* tx_size needs to be 4 even for bare address packets since the atom
+		 * table needs the info in tx_buf[3].
+		 */
+		tx_size = HEADER_SIZE + msg->size;
+		if (msg->size == 0)
+			tx_buf[3] |= BARE_ADDRESS_SIZE << 4;
+		else
+			tx_buf[3] |= tx_size << 4;
+		memcpy(tx_buf + HEADER_SIZE, msg->buffer, msg->size);
+		ret = amdgpu_atombios_dp_process_aux_ch(chan,
+						 tx_buf, tx_size, NULL, 0, delay, &ack);
+		if (ret >= 0)
+			/* Return payload size. */
+			ret = msg->size;
+		break;
+	case DP_AUX_NATIVE_READ:
+	case DP_AUX_I2C_READ:
+		/* tx_size needs to be 4 even for bare address packets since the atom
+		 * table needs the info in tx_buf[3].
+		 */
+		tx_size = HEADER_SIZE;
+		if (msg->size == 0)
+			tx_buf[3] |= BARE_ADDRESS_SIZE << 4;
+		else
+			tx_buf[3] |= tx_size << 4;
+		ret = amdgpu_atombios_dp_process_aux_ch(chan,
+						 tx_buf, tx_size, msg->buffer, msg->size, delay, &ack);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	if (ret >= 0)
+		msg->reply = ack >> 4;
+
+	return ret;
+}
+
+void amdgpu_atombios_dp_aux_init(struct amdgpu_connector *amdgpu_connector)
+{
+	int ret;
+
+	amdgpu_connector->ddc_bus->rec.hpd = amdgpu_connector->hpd.hpd;
+	amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev;
+	amdgpu_connector->ddc_bus->aux.transfer = amdgpu_atombios_dp_aux_transfer;
+	ret = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux);
+	if (!ret)
+		amdgpu_connector->ddc_bus->has_aux = true;
+
+	WARN(ret, "drm_dp_aux_register_i2c_bus() failed with error %d\n", ret);
+}
+
+/***** general DP utility functions *****/
+
+#define DP_VOLTAGE_MAX         DP_TRAIN_VOLTAGE_SWING_LEVEL_3
+#define DP_PRE_EMPHASIS_MAX    DP_TRAIN_PRE_EMPH_LEVEL_3
+
+static void amdgpu_atombios_dp_get_adjust_train(const u8 link_status[DP_LINK_STATUS_SIZE],
+						int lane_count,
+						u8 train_set[4])
+{
+	u8 v = 0;
+	u8 p = 0;
+	int lane;
+
+	for (lane = 0; lane < lane_count; lane++) {
+		u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+		u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
+
+		DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
+			  lane,
+			  voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
+			  pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
+
+		if (this_v > v)
+			v = this_v;
+		if (this_p > p)
+			p = this_p;
+	}
+
+	if (v >= DP_VOLTAGE_MAX)
+		v |= DP_TRAIN_MAX_SWING_REACHED;
+
+	if (p >= DP_PRE_EMPHASIS_MAX)
+		p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+	DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n",
+		  voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
+		  pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
+
+	for (lane = 0; lane < 4; lane++)
+		train_set[lane] = v | p;
+}
+
+/* convert bits per color to bits per pixel */
+/* get bpc from the EDID */
+static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
+{
+	if (bpc == 0)
+		return 24;
+	else
+		return bpc * 3;
+}
+
+/* get the max pix clock supported by the link rate and lane num */
+static int amdgpu_atombios_dp_get_max_dp_pix_clock(int link_rate,
+					    int lane_num,
+					    int bpp)
+{
+	return (link_rate * lane_num * 8) / bpp;
+}
+
+/***** amdgpu specific DP functions *****/
+
+/* First get the min lane# when low rate is used according to pixel clock
+ * (prefer low rate), second check max lane# supported by DP panel,
+ * if the max lane# < low rate lane# then use max lane# instead.
+ */
+static int amdgpu_atombios_dp_get_dp_lane_number(struct drm_connector *connector,
+						 const u8 dpcd[DP_DPCD_SIZE],
+						 int pix_clock)
+{
+	int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
+	int max_link_rate = drm_dp_max_link_rate(dpcd);
+	int max_lane_num = drm_dp_max_lane_count(dpcd);
+	int lane_num;
+	int max_dp_pix_clock;
+
+	for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
+		max_dp_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
+		if (pix_clock <= max_dp_pix_clock)
+			break;
+	}
+
+	return lane_num;
+}
+
+static int amdgpu_atombios_dp_get_dp_link_clock(struct drm_connector *connector,
+						const u8 dpcd[DP_DPCD_SIZE],
+						int pix_clock)
+{
+	int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
+	int lane_num, max_pix_clock;
+
+	if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+	    ENCODER_OBJECT_ID_NUTMEG)
+		return 270000;
+
+	lane_num = amdgpu_atombios_dp_get_dp_lane_number(connector, dpcd, pix_clock);
+	max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(162000, lane_num, bpp);
+	if (pix_clock <= max_pix_clock)
+		return 162000;
+	max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(270000, lane_num, bpp);
+	if (pix_clock <= max_pix_clock)
+		return 270000;
+	if (amdgpu_connector_is_dp12_capable(connector)) {
+		max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(540000, lane_num, bpp);
+		if (pix_clock <= max_pix_clock)
+			return 540000;
+	}
+
+	return drm_dp_max_link_rate(dpcd);
+}
+
+static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
+				      int action, int dp_clock,
+				      u8 ucconfig, u8 lane_num)
+{
+	DP_ENCODER_SERVICE_PARAMETERS args;
+	int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
+
+	memset(&args, 0, sizeof(args));
+	args.ucLinkClock = dp_clock / 10;
+	args.ucConfig = ucconfig;
+	args.ucAction = action;
+	args.ucLaneNum = lane_num;
+	args.ucStatus = 0;
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+	return args.ucStatus;
+}
+
+u8 amdgpu_atombios_dp_get_sinktype(struct amdgpu_connector *amdgpu_connector)
+{
+	struct drm_device *dev = amdgpu_connector->base.dev;
+	struct amdgpu_device *adev = dev->dev_private;
+
+	return amdgpu_atombios_dp_encoder_service(adev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
+					   amdgpu_connector->ddc_bus->rec.i2c_id, 0);
+}
+
+static void amdgpu_atombios_dp_probe_oui(struct amdgpu_connector *amdgpu_connector)
+{
+	struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
+	u8 buf[3];
+
+	if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+		return;
+
+	if (drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3) == 3)
+		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
+			      buf[0], buf[1], buf[2]);
+
+	if (drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3) == 3)
+		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
+			      buf[0], buf[1], buf[2]);
+}
+
+int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector)
+{
+	struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
+	u8 msg[DP_DPCD_SIZE];
+	int ret, i;
+
+	for (i = 0; i < 7; i++) {
+		ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV, msg,
+				       DP_DPCD_SIZE);
+		if (ret == DP_DPCD_SIZE) {
+			memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
+
+			DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
+				      dig_connector->dpcd);
+
+			amdgpu_atombios_dp_probe_oui(amdgpu_connector);
+
+			return 0;
+		}
+	}
+	dig_connector->dpcd[0] = 0;
+	return -EINVAL;
+}
+
+int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder,
+			       struct drm_connector *connector)
+{
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+	struct amdgpu_connector_atom_dig *dig_connector;
+	int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+	u16 dp_bridge = amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector);
+	u8 tmp;
+
+	if (!amdgpu_connector->con_priv)
+		return panel_mode;
+
+	dig_connector = amdgpu_connector->con_priv;
+
+	if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
+		/* DP bridge chips */
+		if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux,
+				      DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
+			if (tmp & 1)
+				panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+			else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
+				 (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
+				panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
+			else
+				panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+		}
+	} else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+		/* eDP */
+		if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux,
+				      DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
+			if (tmp & 1)
+				panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+		}
+	}
+
+	return panel_mode;
+}
+
+void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
+				 const struct drm_display_mode *mode)
+{
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+	struct amdgpu_connector_atom_dig *dig_connector;
+
+	if (!amdgpu_connector->con_priv)
+		return;
+	dig_connector = amdgpu_connector->con_priv;
+
+	if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+	    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
+		dig_connector->dp_clock =
+			amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
+		dig_connector->dp_lane_count =
+			amdgpu_atombios_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
+	}
+}
+
+int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
+				  struct drm_display_mode *mode)
+{
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+	struct amdgpu_connector_atom_dig *dig_connector;
+	int dp_clock;
+
+	if (!amdgpu_connector->con_priv)
+		return MODE_CLOCK_HIGH;
+	dig_connector = amdgpu_connector->con_priv;
+
+	dp_clock =
+		amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
+
+	if ((dp_clock == 540000) &&
+	    (!amdgpu_connector_is_dp12_capable(connector)))
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connector)
+{
+	u8 link_status[DP_LINK_STATUS_SIZE];
+	struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv;
+
+	if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux, link_status)
+	    <= 0)
+		return false;
+	if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
+		return false;
+	return true;
+}
+
+void amdgpu_atombios_dp_set_rx_power_state(struct drm_connector *connector,
+				    u8 power_state)
+{
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+	struct amdgpu_connector_atom_dig *dig_connector;
+
+	if (!amdgpu_connector->con_priv)
+		return;
+
+	dig_connector = amdgpu_connector->con_priv;
+
+	/* power up/down the sink */
+	if (dig_connector->dpcd[0] >= 0x11) {
+		drm_dp_dpcd_writeb(&amdgpu_connector->ddc_bus->aux,
+				   DP_SET_POWER, power_state);
+		usleep_range(1000, 2000);
+	}
+}
+
+struct amdgpu_atombios_dp_link_train_info {
+	struct amdgpu_device *adev;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	int dp_clock;
+	int dp_lane_count;
+	bool tp3_supported;
+	u8 dpcd[DP_RECEIVER_CAP_SIZE];
+	u8 train_set[4];
+	u8 link_status[DP_LINK_STATUS_SIZE];
+	u8 tries;
+	struct drm_dp_aux *aux;
+};
+
+static void
+amdgpu_atombios_dp_update_vs_emph(struct amdgpu_atombios_dp_link_train_info *dp_info)
+{
+	/* set the initial vs/emph on the source */
+	amdgpu_atombios_encoder_setup_dig_transmitter(dp_info->encoder,
+					       ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
+					       0, dp_info->train_set[0]); /* sets all lanes at once */
+
+	/* set the vs/emph on the sink */
+	drm_dp_dpcd_write(dp_info->aux, DP_TRAINING_LANE0_SET,
+			  dp_info->train_set, dp_info->dp_lane_count);
+}
+
+static void
+amdgpu_atombios_dp_set_tp(struct amdgpu_atombios_dp_link_train_info *dp_info, int tp)
+{
+	int rtp = 0;
+
+	/* set training pattern on the source */
+	switch (tp) {
+	case DP_TRAINING_PATTERN_1:
+		rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1;
+		break;
+	case DP_TRAINING_PATTERN_2:
+		rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2;
+		break;
+	case DP_TRAINING_PATTERN_3:
+		rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3;
+			break;
+	}
+	amdgpu_atombios_encoder_setup_dig_encoder(dp_info->encoder, rtp, 0);
+
+	/* enable training pattern on the sink */
+	drm_dp_dpcd_writeb(dp_info->aux, DP_TRAINING_PATTERN_SET, tp);
+}
+
+static int
+amdgpu_atombios_dp_link_train_init(struct amdgpu_atombios_dp_link_train_info *dp_info)
+{
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(dp_info->encoder);
+	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+	u8 tmp;
+
+	/* power up the sink */
+	amdgpu_atombios_dp_set_rx_power_state(dp_info->connector, DP_SET_POWER_D0);
+
+	/* possibly enable downspread on the sink */
+	if (dp_info->dpcd[3] & 0x1)
+		drm_dp_dpcd_writeb(dp_info->aux,
+				   DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5);
+	else
+		drm_dp_dpcd_writeb(dp_info->aux,
+				   DP_DOWNSPREAD_CTRL, 0);
+
+	if (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)
+		drm_dp_dpcd_writeb(dp_info->aux, DP_EDP_CONFIGURATION_SET, 1);
+
+	/* set the lane count on the sink */
+	tmp = dp_info->dp_lane_count;
+	if (drm_dp_enhanced_frame_cap(dp_info->dpcd))
+		tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+	drm_dp_dpcd_writeb(dp_info->aux, DP_LANE_COUNT_SET, tmp);
+
+	/* set the link rate on the sink */
+	tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
+	drm_dp_dpcd_writeb(dp_info->aux, DP_LINK_BW_SET, tmp);
+
+	/* start training on the source */
+	amdgpu_atombios_encoder_setup_dig_encoder(dp_info->encoder,
+					   ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
+
+	/* disable the training pattern on the sink */
+	drm_dp_dpcd_writeb(dp_info->aux,
+			   DP_TRAINING_PATTERN_SET,
+			   DP_TRAINING_PATTERN_DISABLE);
+
+	return 0;
+}
+
+static int
+amdgpu_atombios_dp_link_train_finish(struct amdgpu_atombios_dp_link_train_info *dp_info)
+{
+	udelay(400);
+
+	/* disable the training pattern on the sink */
+	drm_dp_dpcd_writeb(dp_info->aux,
+			   DP_TRAINING_PATTERN_SET,
+			   DP_TRAINING_PATTERN_DISABLE);
+
+	/* disable the training pattern on the source */
+	amdgpu_atombios_encoder_setup_dig_encoder(dp_info->encoder,
+					   ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0);
+
+	return 0;
+}
+
+static int
+amdgpu_atombios_dp_link_train_cr(struct amdgpu_atombios_dp_link_train_info *dp_info)
+{
+	bool clock_recovery;
+	u8 voltage;
+	int i;
+
+	amdgpu_atombios_dp_set_tp(dp_info, DP_TRAINING_PATTERN_1);
+	memset(dp_info->train_set, 0, 4);
+	amdgpu_atombios_dp_update_vs_emph(dp_info);
+
+	udelay(400);
+
+	/* clock recovery loop */
+	clock_recovery = false;
+	dp_info->tries = 0;
+	voltage = 0xff;
+	while (1) {
+		drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
+
+		if (drm_dp_dpcd_read_link_status(dp_info->aux,
+						 dp_info->link_status) <= 0) {
+			DRM_ERROR("displayport link status failed\n");
+			break;
+		}
+
+		if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+			clock_recovery = true;
+			break;
+		}
+
+		for (i = 0; i < dp_info->dp_lane_count; i++) {
+			if ((dp_info->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+				break;
+		}
+		if (i == dp_info->dp_lane_count) {
+			DRM_ERROR("clock recovery reached max voltage\n");
+			break;
+		}
+
+		if ((dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+			++dp_info->tries;
+			if (dp_info->tries == 5) {
+				DRM_ERROR("clock recovery tried 5 times\n");
+				break;
+			}
+		} else
+			dp_info->tries = 0;
+
+		voltage = dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+		/* Compute new train_set as requested by sink */
+		amdgpu_atombios_dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count,
+					     dp_info->train_set);
+
+		amdgpu_atombios_dp_update_vs_emph(dp_info);
+	}
+	if (!clock_recovery) {
+		DRM_ERROR("clock recovery failed\n");
+		return -1;
+	} else {
+		DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n",
+			  dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
+			  (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+			  DP_TRAIN_PRE_EMPHASIS_SHIFT);
+		return 0;
+	}
+}
+
+static int
+amdgpu_atombios_dp_link_train_ce(struct amdgpu_atombios_dp_link_train_info *dp_info)
+{
+	bool channel_eq;
+
+	if (dp_info->tp3_supported)
+		amdgpu_atombios_dp_set_tp(dp_info, DP_TRAINING_PATTERN_3);
+	else
+		amdgpu_atombios_dp_set_tp(dp_info, DP_TRAINING_PATTERN_2);
+
+	/* channel equalization loop */
+	dp_info->tries = 0;
+	channel_eq = false;
+	while (1) {
+		drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
+
+		if (drm_dp_dpcd_read_link_status(dp_info->aux,
+						 dp_info->link_status) <= 0) {
+			DRM_ERROR("displayport link status failed\n");
+			break;
+		}
+
+		if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+			channel_eq = true;
+			break;
+		}
+
+		/* Try 5 times */
+		if (dp_info->tries > 5) {
+			DRM_ERROR("channel eq failed: 5 tries\n");
+			break;
+		}
+
+		/* Compute new train_set as requested by sink */
+		amdgpu_atombios_dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count,
+					     dp_info->train_set);
+
+		amdgpu_atombios_dp_update_vs_emph(dp_info);
+		dp_info->tries++;
+	}
+
+	if (!channel_eq) {
+		DRM_ERROR("channel eq failed\n");
+		return -1;
+	} else {
+		DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n",
+			  dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
+			  (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
+			  >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
+		return 0;
+	}
+}
+
+void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder,
+			    struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct amdgpu_encoder_atom_dig *dig;
+	struct amdgpu_connector *amdgpu_connector;
+	struct amdgpu_connector_atom_dig *dig_connector;
+	struct amdgpu_atombios_dp_link_train_info dp_info;
+	u8 tmp;
+
+	if (!amdgpu_encoder->enc_priv)
+		return;
+	dig = amdgpu_encoder->enc_priv;
+
+	amdgpu_connector = to_amdgpu_connector(connector);
+	if (!amdgpu_connector->con_priv)
+		return;
+	dig_connector = amdgpu_connector->con_priv;
+
+	if ((dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) &&
+	    (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP))
+		return;
+
+	if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp)
+	    == 1) {
+		if (tmp & DP_TPS3_SUPPORTED)
+			dp_info.tp3_supported = true;
+		else
+			dp_info.tp3_supported = false;
+	} else {
+		dp_info.tp3_supported = false;
+	}
+
+	memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
+	dp_info.adev = adev;
+	dp_info.encoder = encoder;
+	dp_info.connector = connector;
+	dp_info.dp_lane_count = dig_connector->dp_lane_count;
+	dp_info.dp_clock = dig_connector->dp_clock;
+	dp_info.aux = &amdgpu_connector->ddc_bus->aux;
+
+	if (amdgpu_atombios_dp_link_train_init(&dp_info))
+		goto done;
+	if (amdgpu_atombios_dp_link_train_cr(&dp_info))
+		goto done;
+	if (amdgpu_atombios_dp_link_train_ce(&dp_info))
+		goto done;
+done:
+	if (amdgpu_atombios_dp_link_train_finish(&dp_info))
+		return;
+}

+ 42 - 0
drivers/gpu/drm/amd/amdgpu/atombios_dp.h

@@ -0,0 +1,42 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __ATOMBIOS_DP_H__
+#define __ATOMBIOS_DP_H__
+
+void amdgpu_atombios_dp_aux_init(struct amdgpu_connector *amdgpu_connector);
+u8 amdgpu_atombios_dp_get_sinktype(struct amdgpu_connector *amdgpu_connector);
+int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector);
+int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder,
+			       struct drm_connector *connector);
+void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
+				 const struct drm_display_mode *mode);
+int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
+				  struct drm_display_mode *mode);
+bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connector);
+void amdgpu_atombios_dp_set_rx_power_state(struct drm_connector *connector,
+				    u8 power_state);
+void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder,
+			    struct drm_connector *connector);
+
+#endif

+ 2066 - 0
drivers/gpu/drm/amd/amdgpu/atombios_encoders.c

@@ -0,0 +1,2066 @@
+/*
+ * Copyright 2007-11 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+#include "amdgpu_connectors.h"
+#include "atom.h"
+#include "atombios_encoders.h"
+#include "atombios_dp.h"
+#include <linux/backlight.h>
+#include "bif/bif_4_1_d.h"
+
+static u8
+amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
+{
+	u8 backlight_level;
+	u32 bios_2_scratch;
+
+	bios_2_scratch = RREG32(mmBIOS_SCRATCH_2);
+
+	backlight_level = ((bios_2_scratch & ATOM_S2_CURRENT_BL_LEVEL_MASK) >>
+			   ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+
+	return backlight_level;
+}
+
+static void
+amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
+					    u8 backlight_level)
+{
+	u32 bios_2_scratch;
+
+	bios_2_scratch = RREG32(mmBIOS_SCRATCH_2);
+
+	bios_2_scratch &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
+	bios_2_scratch |= ((backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) &
+			   ATOM_S2_CURRENT_BL_LEVEL_MASK);
+
+	WREG32(mmBIOS_SCRATCH_2, bios_2_scratch);
+}
+
+u8
+amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
+{
+	struct drm_device *dev = amdgpu_encoder->base.dev;
+	struct amdgpu_device *adev = dev->dev_private;
+
+	if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+		return 0;
+
+	return amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
+}
+
+void
+amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
+				     u8 level)
+{
+	struct drm_encoder *encoder = &amdgpu_encoder->base;
+	struct drm_device *dev = amdgpu_encoder->base.dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_encoder_atom_dig *dig;
+
+	if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+		return;
+
+	if ((amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
+	    amdgpu_encoder->enc_priv) {
+		dig = amdgpu_encoder->enc_priv;
+		dig->backlight_level = level;
+		amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, dig->backlight_level);
+
+		switch (amdgpu_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+			if (dig->backlight_level == 0)
+				amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
+								       ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
+			else {
+				amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
+								       ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL, 0, 0);
+				amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
+								       ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+static u8 amdgpu_atombios_encoder_backlight_level(struct backlight_device *bd)
+{
+	u8 level;
+
+	/* Convert brightness to hardware level */
+	if (bd->props.brightness < 0)
+		level = 0;
+	else if (bd->props.brightness > AMDGPU_MAX_BL_LEVEL)
+		level = AMDGPU_MAX_BL_LEVEL;
+	else
+		level = bd->props.brightness;
+
+	return level;
+}
+
+static int amdgpu_atombios_encoder_update_backlight_status(struct backlight_device *bd)
+{
+	struct amdgpu_backlight_privdata *pdata = bl_get_data(bd);
+	struct amdgpu_encoder *amdgpu_encoder = pdata->encoder;
+
+	amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder,
+					     amdgpu_atombios_encoder_backlight_level(bd));
+
+	return 0;
+}
+
+static int
+amdgpu_atombios_encoder_get_backlight_brightness(struct backlight_device *bd)
+{
+	struct amdgpu_backlight_privdata *pdata = bl_get_data(bd);
+	struct amdgpu_encoder *amdgpu_encoder = pdata->encoder;
+	struct drm_device *dev = amdgpu_encoder->base.dev;
+	struct amdgpu_device *adev = dev->dev_private;
+
+	return amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
+}
+
+static const struct backlight_ops amdgpu_atombios_encoder_backlight_ops = {
+	.get_brightness = amdgpu_atombios_encoder_get_backlight_brightness,
+	.update_status	= amdgpu_atombios_encoder_update_backlight_status,
+};
+
+void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encoder,
+				     struct drm_connector *drm_connector)
+{
+	struct drm_device *dev = amdgpu_encoder->base.dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct backlight_device *bd;
+	struct backlight_properties props;
+	struct amdgpu_backlight_privdata *pdata;
+	struct amdgpu_encoder_atom_dig *dig;
+	u8 backlight_level;
+	char bl_name[16];
+
+	/* Mac laptops with multiple GPUs use the gmux driver for backlight
+	 * so don't register a backlight device
+	 */
+	if ((adev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
+	    (adev->pdev->device == 0x6741))
+		return;
+
+	if (!amdgpu_encoder->enc_priv)
+		return;
+
+	if (!adev->is_atom_bios)
+		return;
+
+	if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+		return;
+
+	pdata = kmalloc(sizeof(struct amdgpu_backlight_privdata), GFP_KERNEL);
+	if (!pdata) {
+		DRM_ERROR("Memory allocation failed\n");
+		goto error;
+	}
+
+	memset(&props, 0, sizeof(props));
+	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
+	props.type = BACKLIGHT_RAW;
+	snprintf(bl_name, sizeof(bl_name),
+		 "amdgpu_bl%d", dev->primary->index);
+	bd = backlight_device_register(bl_name, drm_connector->kdev,
+				       pdata, &amdgpu_atombios_encoder_backlight_ops, &props);
+	if (IS_ERR(bd)) {
+		DRM_ERROR("Backlight registration failed\n");
+		goto error;
+	}
+
+	pdata->encoder = amdgpu_encoder;
+
+	backlight_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
+
+	dig = amdgpu_encoder->enc_priv;
+	dig->bl_dev = bd;
+
+	bd->props.brightness = amdgpu_atombios_encoder_get_backlight_brightness(bd);
+	bd->props.power = FB_BLANK_UNBLANK;
+	backlight_update_status(bd);
+
+	DRM_INFO("amdgpu atom DIG backlight initialized\n");
+
+	return;
+
+error:
+	kfree(pdata);
+	return;
+}
+
+void
+amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *amdgpu_encoder)
+{
+	struct drm_device *dev = amdgpu_encoder->base.dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct backlight_device *bd = NULL;
+	struct amdgpu_encoder_atom_dig *dig;
+
+	if (!amdgpu_encoder->enc_priv)
+		return;
+
+	if (!adev->is_atom_bios)
+		return;
+
+	if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+		return;
+
+	dig = amdgpu_encoder->enc_priv;
+	bd = dig->bl_dev;
+	dig->bl_dev = NULL;
+
+	if (bd) {
+		struct amdgpu_legacy_backlight_privdata *pdata;
+
+		pdata = bl_get_data(bd);
+		backlight_device_unregister(bd);
+		kfree(pdata);
+
+		DRM_INFO("amdgpu atom LVDS backlight unloaded\n");
+	}
+}
+
+#else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *encoder)
+{
+}
+
+void amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *encoder)
+{
+}
+
+#endif
+
+bool amdgpu_atombios_encoder_is_digital(struct drm_encoder *encoder)
+{
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	switch (amdgpu_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+		return true;
+	default:
+		return false;
+	}
+}
+
+bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder,
+				 const struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode)
+{
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+	/* set the active encoder to connector routing */
+	amdgpu_encoder_set_active_device(encoder);
+	drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+	/* hw bug */
+	if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+	    && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
+		adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+
+	/* get the native mode for scaling */
+	if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
+		amdgpu_panel_mode_fixup(encoder, adjusted_mode);
+	else if (amdgpu_encoder->rmx_type != RMX_OFF)
+		amdgpu_panel_mode_fixup(encoder, adjusted_mode);
+
+	if ((amdgpu_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
+		struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+		amdgpu_atombios_dp_set_link_config(connector, adjusted_mode);
+	}
+
+	return true;
+}
+
+static void
+amdgpu_atombios_encoder_setup_dac(struct drm_encoder *encoder, int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	DAC_ENCODER_CONTROL_PS_ALLOCATION args;
+	int index = 0;
+
+	memset(&args, 0, sizeof(args));
+
+	switch (amdgpu_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+		index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
+		break;
+	}
+
+	args.ucAction = action;
+	args.ucDacStandard = ATOM_DAC1_PS2;
+	args.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+static u8 amdgpu_atombios_encoder_get_bpc(struct drm_encoder *encoder)
+{
+	int bpc = 8;
+
+	if (encoder->crtc) {
+		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
+		bpc = amdgpu_crtc->bpc;
+	}
+
+	switch (bpc) {
+	case 0:
+		return PANEL_BPC_UNDEFINE;
+	case 6:
+		return PANEL_6BIT_PER_COLOR;
+	case 8:
+	default:
+		return PANEL_8BIT_PER_COLOR;
+	case 10:
+		return PANEL_10BIT_PER_COLOR;
+	case 12:
+		return PANEL_12BIT_PER_COLOR;
+	case 16:
+		return PANEL_16BIT_PER_COLOR;
+	}
+}
+
+union dvo_encoder_control {
+	ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
+	DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
+	DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
+	DVO_ENCODER_CONTROL_PS_ALLOCATION_V1_4 dvo_v4;
+};
+
+static void
+amdgpu_atombios_encoder_setup_dvo(struct drm_encoder *encoder, int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	union dvo_encoder_control args;
+	int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
+	uint8_t frev, crev;
+
+	memset(&args, 0, sizeof(args));
+
+	if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+			/* R4xx, R5xx */
+			args.ext_tmds.sXTmdsEncoder.ucEnable = action;
+
+			if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
+				args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+
+			args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
+			break;
+		case 2:
+			/* RS600/690/740 */
+			args.dvo.sDVOEncoder.ucAction = action;
+			args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
+			/* DFP1, CRT1, TV1 depending on the type of port */
+			args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
+
+			if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
+				args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
+			break;
+		case 3:
+			/* R6xx */
+			args.dvo_v3.ucAction = action;
+			args.dvo_v3.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
+			args.dvo_v3.ucDVOConfig = 0; /* XXX */
+			break;
+		case 4:
+			/* DCE8 */
+			args.dvo_v4.ucAction = action;
+			args.dvo_v4.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
+			args.dvo_v4.ucDVOConfig = 0; /* XXX */
+			args.dvo_v4.ucBitPerColor = amdgpu_atombios_encoder_get_bpc(encoder);
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		break;
+	}
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder)
+{
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct drm_connector *connector;
+	struct amdgpu_connector *amdgpu_connector;
+	struct amdgpu_connector_atom_dig *dig_connector;
+
+	/* dp bridges are always DP */
+	if (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)
+		return ATOM_ENCODER_MODE_DP;
+
+	/* DVO is always DVO */
+	if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DVO1) ||
+	    (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1))
+		return ATOM_ENCODER_MODE_DVO;
+
+	connector = amdgpu_get_connector_for_encoder(encoder);
+	/* if we don't have an active device yet, just use one of
+	 * the connectors tied to the encoder.
+	 */
+	if (!connector)
+		connector = amdgpu_get_connector_for_encoder_init(encoder);
+	amdgpu_connector = to_amdgpu_connector(connector);
+
+	switch (connector->connector_type) {
+	case DRM_MODE_CONNECTOR_DVII:
+	case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
+		if (amdgpu_audio != 0) {
+			if (amdgpu_connector->use_digital &&
+			    (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE))
+				return ATOM_ENCODER_MODE_HDMI;
+			else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
+				 (amdgpu_connector->audio == AMDGPU_AUDIO_AUTO))
+				return ATOM_ENCODER_MODE_HDMI;
+			else if (amdgpu_connector->use_digital)
+				return ATOM_ENCODER_MODE_DVI;
+			else
+				return ATOM_ENCODER_MODE_CRT;
+		} else if (amdgpu_connector->use_digital) {
+			return ATOM_ENCODER_MODE_DVI;
+		} else {
+			return ATOM_ENCODER_MODE_CRT;
+		}
+		break;
+	case DRM_MODE_CONNECTOR_DVID:
+	case DRM_MODE_CONNECTOR_HDMIA:
+	default:
+		if (amdgpu_audio != 0) {
+			if (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE)
+				return ATOM_ENCODER_MODE_HDMI;
+			else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
+				 (amdgpu_connector->audio == AMDGPU_AUDIO_AUTO))
+				return ATOM_ENCODER_MODE_HDMI;
+			else
+				return ATOM_ENCODER_MODE_DVI;
+		} else {
+			return ATOM_ENCODER_MODE_DVI;
+		}
+		break;
+	case DRM_MODE_CONNECTOR_LVDS:
+		return ATOM_ENCODER_MODE_LVDS;
+		break;
+	case DRM_MODE_CONNECTOR_DisplayPort:
+		dig_connector = amdgpu_connector->con_priv;
+		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
+			return ATOM_ENCODER_MODE_DP;
+		} else if (amdgpu_audio != 0) {
+			if (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE)
+				return ATOM_ENCODER_MODE_HDMI;
+			else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
+				 (amdgpu_connector->audio == AMDGPU_AUDIO_AUTO))
+				return ATOM_ENCODER_MODE_HDMI;
+			else
+				return ATOM_ENCODER_MODE_DVI;
+		} else {
+			return ATOM_ENCODER_MODE_DVI;
+		}
+		break;
+	case DRM_MODE_CONNECTOR_eDP:
+		return ATOM_ENCODER_MODE_DP;
+	case DRM_MODE_CONNECTOR_DVIA:
+	case DRM_MODE_CONNECTOR_VGA:
+		return ATOM_ENCODER_MODE_CRT;
+		break;
+	case DRM_MODE_CONNECTOR_Composite:
+	case DRM_MODE_CONNECTOR_SVIDEO:
+	case DRM_MODE_CONNECTOR_9PinDIN:
+		/* fix me */
+		return ATOM_ENCODER_MODE_TV;
+		/*return ATOM_ENCODER_MODE_CV;*/
+		break;
+	}
+}
+
+/*
+ * DIG Encoder/Transmitter Setup
+ *
+ * DCE 6.0
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 6 DIG encoder blocks.
+ * - DIG to PHY mapping is hardcoded
+ * DIG1 drives UNIPHY0 link A, A+B
+ * DIG2 drives UNIPHY0 link B
+ * DIG3 drives UNIPHY1 link A, A+B
+ * DIG4 drives UNIPHY1 link B
+ * DIG5 drives UNIPHY2 link A, A+B
+ * DIG6 drives UNIPHY2 link B
+ *
+ * Routing
+ * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
+ * Examples:
+ * crtc0 -> dig2 -> LVTMA   links A+B -> TMDS/HDMI
+ * crtc1 -> dig1 -> UNIPHY0 link  B   -> DP
+ * crtc0 -> dig1 -> UNIPHY2 link  A   -> LVDS
+ * crtc1 -> dig2 -> UNIPHY1 link  B+A -> TMDS/HDMI
+ */
+
+union dig_encoder_control {
+	DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
+	DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
+	DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
+	DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
+};
+
+void
+amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder,
+				   int action, int panel_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+	union dig_encoder_control args;
+	int index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
+	uint8_t frev, crev;
+	int dp_clock = 0;
+	int dp_lane_count = 0;
+	int hpd_id = AMDGPU_HPD_NONE;
+
+	if (connector) {
+		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+		struct amdgpu_connector_atom_dig *dig_connector =
+			amdgpu_connector->con_priv;
+
+		dp_clock = dig_connector->dp_clock;
+		dp_lane_count = dig_connector->dp_lane_count;
+		hpd_id = amdgpu_connector->hpd.hpd;
+	}
+
+	/* no dig encoder assigned */
+	if (dig->dig_encoder == -1)
+		return;
+
+	memset(&args, 0, sizeof(args));
+
+	if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+			args.v1.ucAction = action;
+			args.v1.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
+			if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
+				args.v3.ucPanelMode = panel_mode;
+			else
+				args.v1.ucEncoderMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
+				args.v1.ucLaneNum = dp_lane_count;
+			else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
+				args.v1.ucLaneNum = 8;
+			else
+				args.v1.ucLaneNum = 4;
+
+			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+			switch (amdgpu_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+				args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
+				break;
+			}
+			if (dig->linkb)
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+			else
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+			break;
+		case 2:
+		case 3:
+			args.v3.ucAction = action;
+			args.v3.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
+			if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
+				args.v3.ucPanelMode = panel_mode;
+			else
+				args.v3.ucEncoderMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode))
+				args.v3.ucLaneNum = dp_lane_count;
+			else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
+				args.v3.ucLaneNum = 8;
+			else
+				args.v3.ucLaneNum = 4;
+
+			if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode) && (dp_clock == 270000))
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
+			args.v3.acConfig.ucDigSel = dig->dig_encoder;
+			args.v3.ucBitPerColor = amdgpu_atombios_encoder_get_bpc(encoder);
+			break;
+		case 4:
+			args.v4.ucAction = action;
+			args.v4.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
+			if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
+				args.v4.ucPanelMode = panel_mode;
+			else
+				args.v4.ucEncoderMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode))
+				args.v4.ucLaneNum = dp_lane_count;
+			else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
+				args.v4.ucLaneNum = 8;
+			else
+				args.v4.ucLaneNum = 4;
+
+			if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) {
+				if (dp_clock == 540000)
+					args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
+				else if (dp_clock == 324000)
+					args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_3_24GHZ;
+				else if (dp_clock == 270000)
+					args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
+				else
+					args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ;
+			}
+			args.v4.acConfig.ucDigSel = dig->dig_encoder;
+			args.v4.ucBitPerColor = amdgpu_atombios_encoder_get_bpc(encoder);
+			if (hpd_id == AMDGPU_HPD_NONE)
+				args.v4.ucHPD_ID = 0;
+			else
+				args.v4.ucHPD_ID = hpd_id + 1;
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		break;
+	}
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+union dig_transmitter_control {
+	DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 v5;
+};
+
+void
+amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int action,
+				       uint8_t lane_num, uint8_t lane_set)
+{
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+	struct drm_connector *connector;
+	union dig_transmitter_control args;
+	int index = 0;
+	uint8_t frev, crev;
+	bool is_dp = false;
+	int pll_id = 0;
+	int dp_clock = 0;
+	int dp_lane_count = 0;
+	int connector_object_id = 0;
+	int igp_lane_info = 0;
+	int dig_encoder = dig->dig_encoder;
+	int hpd_id = AMDGPU_HPD_NONE;
+
+	if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+		connector = amdgpu_get_connector_for_encoder_init(encoder);
+		/* just needed to avoid bailing in the encoder check.  the encoder
+		 * isn't used for init
+		 */
+		dig_encoder = 0;
+	} else
+		connector = amdgpu_get_connector_for_encoder(encoder);
+
+	if (connector) {
+		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+		struct amdgpu_connector_atom_dig *dig_connector =
+			amdgpu_connector->con_priv;
+
+		hpd_id = amdgpu_connector->hpd.hpd;
+		dp_clock = dig_connector->dp_clock;
+		dp_lane_count = dig_connector->dp_lane_count;
+		connector_object_id =
+			(amdgpu_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+	}
+
+	if (encoder->crtc) {
+		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
+		pll_id = amdgpu_crtc->pll_id;
+	}
+
+	/* no dig encoder assigned */
+	if (dig_encoder == -1)
+		return;
+
+	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)))
+		is_dp = true;
+
+	memset(&args, 0, sizeof(args));
+
+	switch (amdgpu_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+		index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
+		break;
+	}
+
+	if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+			args.v1.ucAction = action;
+			if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+				args.v1.usInitInfo = cpu_to_le16(connector_object_id);
+			} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+				args.v1.asMode.ucLaneSel = lane_num;
+				args.v1.asMode.ucLaneSet = lane_set;
+			} else {
+				if (is_dp)
+					args.v1.usPixelClock = cpu_to_le16(dp_clock / 10);
+				else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
+					args.v1.usPixelClock = cpu_to_le16((amdgpu_encoder->pixel_clock / 2) / 10);
+				else
+					args.v1.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
+			}
+
+			args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
+
+			if (dig_encoder)
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
+			else
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
+
+			if ((adev->flags & AMDGPU_IS_APU) &&
+			    (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
+				if (is_dp ||
+				    !amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) {
+					if (igp_lane_info & 0x1)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+					else if (igp_lane_info & 0x2)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
+					else if (igp_lane_info & 0x4)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
+					else if (igp_lane_info & 0x8)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
+				} else {
+					if (igp_lane_info & 0x3)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
+					else if (igp_lane_info & 0xc)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
+				}
+			}
+
+			if (dig->linkb)
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
+			else
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
+
+			if (is_dp)
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+			else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+				if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
+					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
+			}
+			break;
+		case 2:
+			args.v2.ucAction = action;
+			if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+				args.v2.usInitInfo = cpu_to_le16(connector_object_id);
+			} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+				args.v2.asMode.ucLaneSel = lane_num;
+				args.v2.asMode.ucLaneSet = lane_set;
+			} else {
+				if (is_dp)
+					args.v2.usPixelClock = cpu_to_le16(dp_clock / 10);
+				else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
+					args.v2.usPixelClock = cpu_to_le16((amdgpu_encoder->pixel_clock / 2) / 10);
+				else
+					args.v2.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
+			}
+
+			args.v2.acConfig.ucEncoderSel = dig_encoder;
+			if (dig->linkb)
+				args.v2.acConfig.ucLinkSel = 1;
+
+			switch (amdgpu_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				args.v2.acConfig.ucTransmitterSel = 0;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				args.v2.acConfig.ucTransmitterSel = 1;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				args.v2.acConfig.ucTransmitterSel = 2;
+				break;
+			}
+
+			if (is_dp) {
+				args.v2.acConfig.fCoherentMode = 1;
+				args.v2.acConfig.fDPConnector = 1;
+			} else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v2.acConfig.fCoherentMode = 1;
+				if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
+					args.v2.acConfig.fDualLinkConnector = 1;
+			}
+			break;
+		case 3:
+			args.v3.ucAction = action;
+			if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+				args.v3.usInitInfo = cpu_to_le16(connector_object_id);
+			} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+				args.v3.asMode.ucLaneSel = lane_num;
+				args.v3.asMode.ucLaneSet = lane_set;
+			} else {
+				if (is_dp)
+					args.v3.usPixelClock = cpu_to_le16(dp_clock / 10);
+				else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
+					args.v3.usPixelClock = cpu_to_le16((amdgpu_encoder->pixel_clock / 2) / 10);
+				else
+					args.v3.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
+			}
+
+			if (is_dp)
+				args.v3.ucLaneNum = dp_lane_count;
+			else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
+				args.v3.ucLaneNum = 8;
+			else
+				args.v3.ucLaneNum = 4;
+
+			if (dig->linkb)
+				args.v3.acConfig.ucLinkSel = 1;
+			if (dig_encoder & 1)
+				args.v3.acConfig.ucEncoderSel = 1;
+
+			/* Select the PLL for the PHY
+			 * DP PHY should be clocked from external src if there is
+			 * one.
+			 */
+			/* On DCE4, if there is an external clock, it generates the DP ref clock */
+			if (is_dp && adev->clock.dp_extclk)
+				args.v3.acConfig.ucRefClkSource = 2; /* external src */
+			else
+				args.v3.acConfig.ucRefClkSource = pll_id;
+
+			switch (amdgpu_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				args.v3.acConfig.ucTransmitterSel = 0;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				args.v3.acConfig.ucTransmitterSel = 1;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				args.v3.acConfig.ucTransmitterSel = 2;
+				break;
+			}
+
+			if (is_dp)
+				args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
+			else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v3.acConfig.fCoherentMode = 1;
+				if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
+					args.v3.acConfig.fDualLinkConnector = 1;
+			}
+			break;
+		case 4:
+			args.v4.ucAction = action;
+			if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+				args.v4.usInitInfo = cpu_to_le16(connector_object_id);
+			} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+				args.v4.asMode.ucLaneSel = lane_num;
+				args.v4.asMode.ucLaneSet = lane_set;
+			} else {
+				if (is_dp)
+					args.v4.usPixelClock = cpu_to_le16(dp_clock / 10);
+				else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
+					args.v4.usPixelClock = cpu_to_le16((amdgpu_encoder->pixel_clock / 2) / 10);
+				else
+					args.v4.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
+			}
+
+			if (is_dp)
+				args.v4.ucLaneNum = dp_lane_count;
+			else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
+				args.v4.ucLaneNum = 8;
+			else
+				args.v4.ucLaneNum = 4;
+
+			if (dig->linkb)
+				args.v4.acConfig.ucLinkSel = 1;
+			if (dig_encoder & 1)
+				args.v4.acConfig.ucEncoderSel = 1;
+
+			/* Select the PLL for the PHY
+			 * DP PHY should be clocked from external src if there is
+			 * one.
+			 */
+			/* On DCE5 DCPLL usually generates the DP ref clock */
+			if (is_dp) {
+				if (adev->clock.dp_extclk)
+					args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK;
+				else
+					args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL;
+			} else
+				args.v4.acConfig.ucRefClkSource = pll_id;
+
+			switch (amdgpu_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				args.v4.acConfig.ucTransmitterSel = 0;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				args.v4.acConfig.ucTransmitterSel = 1;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				args.v4.acConfig.ucTransmitterSel = 2;
+				break;
+			}
+
+			if (is_dp)
+				args.v4.acConfig.fCoherentMode = 1; /* DP requires coherent */
+			else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v4.acConfig.fCoherentMode = 1;
+				if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
+					args.v4.acConfig.fDualLinkConnector = 1;
+			}
+			break;
+		case 5:
+			args.v5.ucAction = action;
+			if (is_dp)
+				args.v5.usSymClock = cpu_to_le16(dp_clock / 10);
+			else
+				args.v5.usSymClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
+
+			switch (amdgpu_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				if (dig->linkb)
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYB;
+				else
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYA;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				if (dig->linkb)
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYD;
+				else
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYC;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				if (dig->linkb)
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYF;
+				else
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYE;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+				args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYG;
+				break;
+			}
+			if (is_dp)
+				args.v5.ucLaneNum = dp_lane_count;
+			else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
+				args.v5.ucLaneNum = 8;
+			else
+				args.v5.ucLaneNum = 4;
+			args.v5.ucConnObjId = connector_object_id;
+			args.v5.ucDigMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
+
+			if (is_dp && adev->clock.dp_extclk)
+				args.v5.asConfig.ucPhyClkSrcId = ENCODER_REFCLK_SRC_EXTCLK;
+			else
+				args.v5.asConfig.ucPhyClkSrcId = pll_id;
+
+			if (is_dp)
+				args.v5.asConfig.ucCoherentMode = 1; /* DP requires coherent */
+			else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v5.asConfig.ucCoherentMode = 1;
+			}
+			if (hpd_id == AMDGPU_HPD_NONE)
+				args.v5.asConfig.ucHPDSel = 0;
+			else
+				args.v5.asConfig.ucHPDSel = hpd_id + 1;
+			args.v5.ucDigEncoderSel = 1 << dig_encoder;
+			args.v5.ucDPLaneSet = lane_set;
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		break;
+	}
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+bool
+amdgpu_atombios_encoder_set_edp_panel_power(struct drm_connector *connector,
+				     int action)
+{
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+	struct drm_device *dev = amdgpu_connector->base.dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	union dig_transmitter_control args;
+	int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
+	uint8_t frev, crev;
+
+	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
+		goto done;
+
+	if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
+	    (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
+		goto done;
+
+	if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
+		goto done;
+
+	memset(&args, 0, sizeof(args));
+
+	args.v1.ucAction = action;
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	/* wait for the panel to power up */
+	if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
+		int i;
+
+		for (i = 0; i < 300; i++) {
+			if (amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd))
+				return true;
+			mdelay(1);
+		}
+		return false;
+	}
+done:
+	return true;
+}
+
+union external_encoder_control {
+	EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
+	EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
+};
+
+static void
+amdgpu_atombios_encoder_setup_external_encoder(struct drm_encoder *encoder,
+					struct drm_encoder *ext_encoder,
+					int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct amdgpu_encoder *ext_amdgpu_encoder = to_amdgpu_encoder(ext_encoder);
+	union external_encoder_control args;
+	struct drm_connector *connector;
+	int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
+	u8 frev, crev;
+	int dp_clock = 0;
+	int dp_lane_count = 0;
+	int connector_object_id = 0;
+	u32 ext_enum = (ext_amdgpu_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+
+	if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
+		connector = amdgpu_get_connector_for_encoder_init(encoder);
+	else
+		connector = amdgpu_get_connector_for_encoder(encoder);
+
+	if (connector) {
+		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+		struct amdgpu_connector_atom_dig *dig_connector =
+			amdgpu_connector->con_priv;
+
+		dp_clock = dig_connector->dp_clock;
+		dp_lane_count = dig_connector->dp_lane_count;
+		connector_object_id =
+			(amdgpu_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+	}
+
+	memset(&args, 0, sizeof(args));
+
+	if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		/* no params on frev 1 */
+		break;
+	case 2:
+		switch (crev) {
+		case 1:
+		case 2:
+			args.v1.sDigEncoder.ucAction = action;
+			args.v1.sDigEncoder.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
+			args.v1.sDigEncoder.ucEncoderMode =
+				amdgpu_atombios_encoder_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v1.sDigEncoder.ucEncoderMode)) {
+				if (dp_clock == 270000)
+					args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+				args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
+			} else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
+				args.v1.sDigEncoder.ucLaneNum = 8;
+			else
+				args.v1.sDigEncoder.ucLaneNum = 4;
+			break;
+		case 3:
+			args.v3.sExtEncoder.ucAction = action;
+			if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
+				args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
+			else
+				args.v3.sExtEncoder.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
+			args.v3.sExtEncoder.ucEncoderMode =
+				amdgpu_atombios_encoder_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v3.sExtEncoder.ucEncoderMode)) {
+				if (dp_clock == 270000)
+					args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
+				else if (dp_clock == 540000)
+					args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
+				args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
+			} else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
+				args.v3.sExtEncoder.ucLaneNum = 8;
+			else
+				args.v3.sExtEncoder.ucLaneNum = 4;
+			switch (ext_enum) {
+			case GRAPH_OBJECT_ENUM_ID1:
+				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
+				break;
+			case GRAPH_OBJECT_ENUM_ID2:
+				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
+				break;
+			case GRAPH_OBJECT_ENUM_ID3:
+				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
+				break;
+			}
+			args.v3.sExtEncoder.ucBitPerColor = amdgpu_atombios_encoder_get_bpc(encoder);
+			break;
+		default:
+			DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+			return;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+		return;
+	}
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void
+amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action)
+{
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder);
+	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+	struct amdgpu_connector *amdgpu_connector = NULL;
+	struct amdgpu_connector_atom_dig *amdgpu_dig_connector = NULL;
+
+	if (connector) {
+		amdgpu_connector = to_amdgpu_connector(connector);
+		amdgpu_dig_connector = amdgpu_connector->con_priv;
+	}
+
+	if (action == ATOM_ENABLE) {
+		if (!connector)
+			dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+		else
+			dig->panel_mode = amdgpu_atombios_dp_get_panel_mode(encoder, connector);
+
+		/* setup and enable the encoder */
+		amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+		amdgpu_atombios_encoder_setup_dig_encoder(encoder,
+						   ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
+						   dig->panel_mode);
+		if (ext_encoder)
+			amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder,
+								EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
+		if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)) &&
+		    connector) {
+			if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+				amdgpu_atombios_encoder_set_edp_panel_power(connector,
+								     ATOM_TRANSMITTER_ACTION_POWER_ON);
+				amdgpu_dig_connector->edp_on = true;
+			}
+		}
+		/* enable the transmitter */
+		amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
+						       ATOM_TRANSMITTER_ACTION_ENABLE,
+						       0, 0);
+		if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)) &&
+		    connector) {
+			/* DP_SET_POWER_D0 is set in amdgpu_atombios_dp_link_train */
+			amdgpu_atombios_dp_link_train(encoder, connector);
+			amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
+		}
+		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
+							       ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+		if (ext_encoder)
+			amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE);
+	} else {
+		if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)) &&
+		    connector)
+			amdgpu_atombios_encoder_setup_dig_encoder(encoder,
+							   ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
+		if (ext_encoder)
+			amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_DISABLE);
+		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
+							       ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
+
+		if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)) &&
+		    connector)
+			amdgpu_atombios_dp_set_rx_power_state(connector, DP_SET_POWER_D3);
+		/* disable the transmitter */
+		amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
+						       ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+		if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)) &&
+		    connector) {
+			if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+				amdgpu_atombios_encoder_set_edp_panel_power(connector,
+								     ATOM_TRANSMITTER_ACTION_POWER_OFF);
+				amdgpu_dig_connector->edp_on = false;
+			}
+		}
+	}
+}
+
+void
+amdgpu_atombios_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+	DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
+		  amdgpu_encoder->encoder_id, mode, amdgpu_encoder->devices,
+		  amdgpu_encoder->active_device);
+	switch (amdgpu_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+		switch (mode) {
+		case DRM_MODE_DPMS_ON:
+			amdgpu_atombios_encoder_setup_dig(encoder, ATOM_ENABLE);
+			break;
+		case DRM_MODE_DPMS_STANDBY:
+		case DRM_MODE_DPMS_SUSPEND:
+		case DRM_MODE_DPMS_OFF:
+			amdgpu_atombios_encoder_setup_dig(encoder, ATOM_DISABLE);
+			break;
+		}
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		switch (mode) {
+		case DRM_MODE_DPMS_ON:
+			amdgpu_atombios_encoder_setup_dvo(encoder, ATOM_ENABLE);
+			break;
+		case DRM_MODE_DPMS_STANDBY:
+		case DRM_MODE_DPMS_SUSPEND:
+		case DRM_MODE_DPMS_OFF:
+			amdgpu_atombios_encoder_setup_dvo(encoder, ATOM_DISABLE);
+			break;
+		}
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+		switch (mode) {
+		case DRM_MODE_DPMS_ON:
+			amdgpu_atombios_encoder_setup_dac(encoder, ATOM_ENABLE);
+			break;
+		case DRM_MODE_DPMS_STANDBY:
+		case DRM_MODE_DPMS_SUSPEND:
+		case DRM_MODE_DPMS_OFF:
+			amdgpu_atombios_encoder_setup_dac(encoder, ATOM_DISABLE);
+			break;
+		}
+		break;
+	default:
+		return;
+	}
+}
+
+union crtc_source_param {
+	SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
+	SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
+	SELECT_CRTC_SOURCE_PARAMETERS_V3 v3;
+};
+
+void
+amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
+	union crtc_source_param args;
+	int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
+	uint8_t frev, crev;
+	struct amdgpu_encoder_atom_dig *dig;
+
+	memset(&args, 0, sizeof(args));
+
+	if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+		default:
+			args.v1.ucCRTC = amdgpu_crtc->crtc_id;
+			switch (amdgpu_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+				args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+			case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+				if (amdgpu_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT)
+					args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX;
+				else
+					args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+			case ENCODER_OBJECT_ID_INTERNAL_DDI:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+				args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+				if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+					args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
+				else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+					args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
+				else
+					args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+				if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+					args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
+				else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+					args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
+				else
+					args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
+				break;
+			}
+			break;
+		case 2:
+			args.v2.ucCRTC = amdgpu_crtc->crtc_id;
+			if (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) {
+				struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+
+				if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+					args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
+				else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA)
+					args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
+				else
+					args.v2.ucEncodeMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
+			} else if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+				args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
+			} else {
+				args.v2.ucEncodeMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
+			}
+			switch (amdgpu_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+				dig = amdgpu_encoder->enc_priv;
+				switch (dig->dig_encoder) {
+				case 0:
+					args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+					break;
+				case 1:
+					args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+					break;
+				case 2:
+					args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
+					break;
+				case 3:
+					args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
+					break;
+				case 4:
+					args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
+					break;
+				case 5:
+					args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
+					break;
+				case 6:
+					args.v2.ucEncoderID = ASIC_INT_DIG7_ENCODER_ID;
+					break;
+				}
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+				args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+				if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else
+					args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+				if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else
+					args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
+				break;
+			}
+			break;
+		case 3:
+			args.v3.ucCRTC = amdgpu_crtc->crtc_id;
+			if (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) {
+				struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+
+				if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+					args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
+				else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA)
+					args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
+				else
+					args.v2.ucEncodeMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
+			} else if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+				args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
+			} else {
+				args.v2.ucEncodeMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
+			}
+			args.v3.ucDstBpc = amdgpu_atombios_encoder_get_bpc(encoder);
+			switch (amdgpu_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+				dig = amdgpu_encoder->enc_priv;
+				switch (dig->dig_encoder) {
+				case 0:
+					args.v3.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+					break;
+				case 1:
+					args.v3.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+					break;
+				case 2:
+					args.v3.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
+					break;
+				case 3:
+					args.v3.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
+					break;
+				case 4:
+					args.v3.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
+					break;
+				case 5:
+					args.v3.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
+					break;
+				case 6:
+					args.v3.ucEncoderID = ASIC_INT_DIG7_ENCODER_ID;
+					break;
+				}
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+				args.v3.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+				if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+					args.v3.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+					args.v3.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else
+					args.v3.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+				if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+					args.v3.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+					args.v3.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else
+					args.v3.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
+				break;
+			}
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+		return;
+	}
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+/* This only needs to be called once at startup */
+void
+amdgpu_atombios_encoder_init_dig(struct amdgpu_device *adev)
+{
+	struct drm_device *dev = adev->ddev;
+	struct drm_encoder *encoder;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+		struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder);
+
+		switch (amdgpu_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+			amdgpu_atombios_encoder_setup_dig_transmitter(encoder, ATOM_TRANSMITTER_ACTION_INIT,
+							       0, 0);
+			break;
+		}
+
+		if (ext_encoder)
+			amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder,
+								EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
+	}
+}
+
+static bool
+amdgpu_atombios_encoder_dac_load_detect(struct drm_encoder *encoder,
+				 struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+	if (amdgpu_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
+				       ATOM_DEVICE_CV_SUPPORT |
+				       ATOM_DEVICE_CRT_SUPPORT)) {
+		DAC_LOAD_DETECTION_PS_ALLOCATION args;
+		int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection);
+		uint8_t frev, crev;
+
+		memset(&args, 0, sizeof(args));
+
+		if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
+			return false;
+
+		args.sDacload.ucMisc = 0;
+
+		if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) ||
+		    (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1))
+			args.sDacload.ucDacType = ATOM_DAC_A;
+		else
+			args.sDacload.ucDacType = ATOM_DAC_B;
+
+		if (amdgpu_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)
+			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
+		else if (amdgpu_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)
+			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
+		else if (amdgpu_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
+			if (crev >= 3)
+				args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
+		} else if (amdgpu_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
+			if (crev >= 3)
+				args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
+		}
+
+		amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		return true;
+	} else
+		return false;
+}
+
+enum drm_connector_status
+amdgpu_atombios_encoder_dac_detect(struct drm_encoder *encoder,
+			    struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+	uint32_t bios_0_scratch;
+
+	if (!amdgpu_atombios_encoder_dac_load_detect(encoder, connector)) {
+		DRM_DEBUG_KMS("detect returned false \n");
+		return connector_status_unknown;
+	}
+
+	bios_0_scratch = RREG32(mmBIOS_SCRATCH_0);
+
+	DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, amdgpu_encoder->devices);
+	if (amdgpu_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+		if (bios_0_scratch & ATOM_S0_CRT1_MASK)
+			return connector_status_connected;
+	}
+	if (amdgpu_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+		if (bios_0_scratch & ATOM_S0_CRT2_MASK)
+			return connector_status_connected;
+	}
+	if (amdgpu_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+		if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
+			return connector_status_connected;
+	}
+	if (amdgpu_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+		if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
+			return connector_status_connected; /* CTV */
+		else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
+			return connector_status_connected; /* STV */
+	}
+	return connector_status_disconnected;
+}
+
+enum drm_connector_status
+amdgpu_atombios_encoder_dig_detect(struct drm_encoder *encoder,
+			    struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+	struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder);
+	u32 bios_0_scratch;
+
+	if (!ext_encoder)
+		return connector_status_unknown;
+
+	if ((amdgpu_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
+		return connector_status_unknown;
+
+	/* load detect on the dp bridge */
+	amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder,
+						EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
+
+	bios_0_scratch = RREG32(mmBIOS_SCRATCH_0);
+
+	DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, amdgpu_encoder->devices);
+	if (amdgpu_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+		if (bios_0_scratch & ATOM_S0_CRT1_MASK)
+			return connector_status_connected;
+	}
+	if (amdgpu_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+		if (bios_0_scratch & ATOM_S0_CRT2_MASK)
+			return connector_status_connected;
+	}
+	if (amdgpu_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+		if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
+			return connector_status_connected;
+	}
+	if (amdgpu_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+		if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
+			return connector_status_connected; /* CTV */
+		else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
+			return connector_status_connected; /* STV */
+	}
+	return connector_status_disconnected;
+}
+
+void
+amdgpu_atombios_encoder_setup_ext_encoder_ddc(struct drm_encoder *encoder)
+{
+	struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder);
+
+	if (ext_encoder)
+		/* ddc_setup on the dp bridge */
+		amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder,
+							EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
+
+}
+
+void
+amdgpu_atombios_encoder_set_bios_scratch_regs(struct drm_connector *connector,
+				       struct drm_encoder *encoder,
+				       bool connected)
+{
+	struct drm_device *dev = connector->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_connector *amdgpu_connector =
+	    to_amdgpu_connector(connector);
+	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+	uint32_t bios_0_scratch, bios_3_scratch, bios_6_scratch;
+
+	bios_0_scratch = RREG32(mmBIOS_SCRATCH_0);
+	bios_3_scratch = RREG32(mmBIOS_SCRATCH_3);
+	bios_6_scratch = RREG32(mmBIOS_SCRATCH_6);
+
+	if ((amdgpu_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
+	    (amdgpu_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("LCD1 connected\n");
+			bios_0_scratch |= ATOM_S0_LCD1;
+			bios_3_scratch |= ATOM_S3_LCD1_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_LCD1;
+		} else {
+			DRM_DEBUG_KMS("LCD1 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_LCD1;
+			bios_3_scratch &= ~ATOM_S3_LCD1_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_LCD1;
+		}
+	}
+	if ((amdgpu_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
+	    (amdgpu_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("CRT1 connected\n");
+			bios_0_scratch |= ATOM_S0_CRT1_COLOR;
+			bios_3_scratch |= ATOM_S3_CRT1_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_CRT1;
+		} else {
+			DRM_DEBUG_KMS("CRT1 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_CRT1_MASK;
+			bios_3_scratch &= ~ATOM_S3_CRT1_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT1;
+		}
+	}
+	if ((amdgpu_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
+	    (amdgpu_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("CRT2 connected\n");
+			bios_0_scratch |= ATOM_S0_CRT2_COLOR;
+			bios_3_scratch |= ATOM_S3_CRT2_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_CRT2;
+		} else {
+			DRM_DEBUG_KMS("CRT2 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_CRT2_MASK;
+			bios_3_scratch &= ~ATOM_S3_CRT2_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT2;
+		}
+	}
+	if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
+	    (amdgpu_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP1 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP1;
+			bios_3_scratch |= ATOM_S3_DFP1_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP1;
+		} else {
+			DRM_DEBUG_KMS("DFP1 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP1;
+			bios_3_scratch &= ~ATOM_S3_DFP1_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP1;
+		}
+	}
+	if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
+	    (amdgpu_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP2 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP2;
+			bios_3_scratch |= ATOM_S3_DFP2_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP2;
+		} else {
+			DRM_DEBUG_KMS("DFP2 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP2;
+			bios_3_scratch &= ~ATOM_S3_DFP2_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP2;
+		}
+	}
+	if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) &&
+	    (amdgpu_connector->devices & ATOM_DEVICE_DFP3_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP3 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP3;
+			bios_3_scratch |= ATOM_S3_DFP3_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP3;
+		} else {
+			DRM_DEBUG_KMS("DFP3 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP3;
+			bios_3_scratch &= ~ATOM_S3_DFP3_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP3;
+		}
+	}
+	if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) &&
+	    (amdgpu_connector->devices & ATOM_DEVICE_DFP4_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP4 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP4;
+			bios_3_scratch |= ATOM_S3_DFP4_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP4;
+		} else {
+			DRM_DEBUG_KMS("DFP4 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP4;
+			bios_3_scratch &= ~ATOM_S3_DFP4_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP4;
+		}
+	}
+	if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) &&
+	    (amdgpu_connector->devices & ATOM_DEVICE_DFP5_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP5 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP5;
+			bios_3_scratch |= ATOM_S3_DFP5_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP5;
+		} else {
+			DRM_DEBUG_KMS("DFP5 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP5;
+			bios_3_scratch &= ~ATOM_S3_DFP5_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5;
+		}
+	}
+	if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP6_SUPPORT) &&
+	    (amdgpu_connector->devices & ATOM_DEVICE_DFP6_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP6 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP6;
+			bios_3_scratch |= ATOM_S3_DFP6_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP6;
+		} else {
+			DRM_DEBUG_KMS("DFP6 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP6;
+			bios_3_scratch &= ~ATOM_S3_DFP6_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP6;
+		}
+	}
+
+	WREG32(mmBIOS_SCRATCH_0, bios_0_scratch);
+	WREG32(mmBIOS_SCRATCH_3, bios_3_scratch);
+	WREG32(mmBIOS_SCRATCH_6, bios_6_scratch);
+}
+
+union lvds_info {
+	struct _ATOM_LVDS_INFO info;
+	struct _ATOM_LVDS_INFO_V12 info_12;
+};
+
+struct amdgpu_encoder_atom_dig *
+amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
+	uint16_t data_offset, misc;
+	union lvds_info *lvds_info;
+	uint8_t frev, crev;
+	struct amdgpu_encoder_atom_dig *lvds = NULL;
+	int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+
+	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		lvds_info =
+			(union lvds_info *)(mode_info->atom_context->bios + data_offset);
+		lvds =
+		    kzalloc(sizeof(struct amdgpu_encoder_atom_dig), GFP_KERNEL);
+
+		if (!lvds)
+			return NULL;
+
+		lvds->native_mode.clock =
+		    le16_to_cpu(lvds_info->info.sLCDTiming.usPixClk) * 10;
+		lvds->native_mode.hdisplay =
+		    le16_to_cpu(lvds_info->info.sLCDTiming.usHActive);
+		lvds->native_mode.vdisplay =
+		    le16_to_cpu(lvds_info->info.sLCDTiming.usVActive);
+		lvds->native_mode.htotal = lvds->native_mode.hdisplay +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time);
+		lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset);
+		lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth);
+		lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
+		lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
+		lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
+		lvds->panel_pwr_delay =
+		    le16_to_cpu(lvds_info->info.usOffDelayInMs);
+		lvds->lcd_misc = lvds_info->info.ucLVDS_Misc;
+
+		misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess);
+		if (misc & ATOM_VSYNC_POLARITY)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+		if (misc & ATOM_HSYNC_POLARITY)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+		if (misc & ATOM_COMPOSITESYNC)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC;
+		if (misc & ATOM_INTERLACE)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+		if (misc & ATOM_DOUBLE_CLOCK_MODE)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
+
+		lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize);
+		lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize);
+
+		/* set crtc values */
+		drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
+
+		lvds->lcd_ss_id = lvds_info->info.ucSS_Id;
+
+		encoder->native_mode = lvds->native_mode;
+
+		if (encoder_enum == 2)
+			lvds->linkb = true;
+		else
+			lvds->linkb = false;
+
+		/* parse the lcd record table */
+		if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) {
+			ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
+			ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
+			bool bad_record = false;
+			u8 *record;
+
+			if ((frev == 1) && (crev < 2))
+				/* absolute */
+				record = (u8 *)(mode_info->atom_context->bios +
+						le16_to_cpu(lvds_info->info.usModePatchTableOffset));
+			else
+				/* relative */
+				record = (u8 *)(mode_info->atom_context->bios +
+						data_offset +
+						le16_to_cpu(lvds_info->info.usModePatchTableOffset));
+			while (*record != ATOM_RECORD_END_TYPE) {
+				switch (*record) {
+				case LCD_MODE_PATCH_RECORD_MODE_TYPE:
+					record += sizeof(ATOM_PATCH_RECORD_MODE);
+					break;
+				case LCD_RTS_RECORD_TYPE:
+					record += sizeof(ATOM_LCD_RTS_RECORD);
+					break;
+				case LCD_CAP_RECORD_TYPE:
+					record += sizeof(ATOM_LCD_MODE_CONTROL_CAP);
+					break;
+				case LCD_FAKE_EDID_PATCH_RECORD_TYPE:
+					fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
+					if (fake_edid_record->ucFakeEDIDLength) {
+						struct edid *edid;
+						int edid_size =
+							max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
+						edid = kmalloc(edid_size, GFP_KERNEL);
+						if (edid) {
+							memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
+							       fake_edid_record->ucFakeEDIDLength);
+
+							if (drm_edid_is_valid(edid)) {
+								adev->mode_info.bios_hardcoded_edid = edid;
+								adev->mode_info.bios_hardcoded_edid_size = edid_size;
+							} else
+								kfree(edid);
+						}
+					}
+					record += fake_edid_record->ucFakeEDIDLength ?
+						fake_edid_record->ucFakeEDIDLength + 2 :
+						sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
+					break;
+				case LCD_PANEL_RESOLUTION_RECORD_TYPE:
+					panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
+					lvds->native_mode.width_mm = panel_res_record->usHSize;
+					lvds->native_mode.height_mm = panel_res_record->usVSize;
+					record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD);
+					break;
+				default:
+					DRM_ERROR("Bad LCD record %d\n", *record);
+					bad_record = true;
+					break;
+				}
+				if (bad_record)
+					break;
+			}
+		}
+	}
+	return lvds;
+}
+
+struct amdgpu_encoder_atom_dig *
+amdgpu_atombios_encoder_get_dig_info(struct amdgpu_encoder *amdgpu_encoder)
+{
+	int encoder_enum = (amdgpu_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+	struct amdgpu_encoder_atom_dig *dig = kzalloc(sizeof(struct amdgpu_encoder_atom_dig), GFP_KERNEL);
+
+	if (!dig)
+		return NULL;
+
+	/* coherent mode by default */
+	dig->coherent_mode = true;
+	dig->dig_encoder = -1;
+
+	if (encoder_enum == 2)
+		dig->linkb = true;
+	else
+		dig->linkb = false;
+
+	return dig;
+}
+

+ 73 - 0
drivers/gpu/drm/amd/amdgpu/atombios_encoders.h

@@ -0,0 +1,73 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __ATOMBIOS_ENCODER_H__
+#define __ATOMBIOS_ENCODER_H__
+
+u8
+amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder);
+void
+amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
+				     u8 level);
+void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encoder,
+				     struct drm_connector *drm_connector);
+void
+amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *amdgpu_encoder);
+bool amdgpu_atombios_encoder_is_digital(struct drm_encoder *encoder);
+bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder,
+				 const struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode);
+int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder);
+void
+amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder,
+				   int action, int panel_mode);
+void
+amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int action,
+				       uint8_t lane_num, uint8_t lane_set);
+bool
+amdgpu_atombios_encoder_set_edp_panel_power(struct drm_connector *connector,
+				     int action);
+void
+amdgpu_atombios_encoder_dpms(struct drm_encoder *encoder, int mode);
+void
+amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder);
+void
+amdgpu_atombios_encoder_init_dig(struct amdgpu_device *adev);
+enum drm_connector_status
+amdgpu_atombios_encoder_dac_detect(struct drm_encoder *encoder,
+			    struct drm_connector *connector);
+enum drm_connector_status
+amdgpu_atombios_encoder_dig_detect(struct drm_encoder *encoder,
+			    struct drm_connector *connector);
+void
+amdgpu_atombios_encoder_setup_ext_encoder_ddc(struct drm_encoder *encoder);
+void
+amdgpu_atombios_encoder_set_bios_scratch_regs(struct drm_connector *connector,
+				       struct drm_encoder *encoder,
+				       bool connected);
+struct amdgpu_encoder_atom_dig *
+amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder);
+struct amdgpu_encoder_atom_dig *
+amdgpu_atombios_encoder_get_dig_info(struct amdgpu_encoder *amdgpu_encoder);
+
+#endif

+ 158 - 0
drivers/gpu/drm/amd/amdgpu/atombios_i2c.c

@@ -0,0 +1,158 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ *
+ */
+#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+#include "atom.h"
+#include "amdgpu_atombios.h"
+
+#define TARGET_HW_I2C_CLOCK 50
+
+/* these are a limitation of ProcessI2cChannelTransaction not the hw */
+#define ATOM_MAX_HW_I2C_WRITE 3
+#define ATOM_MAX_HW_I2C_READ  255
+
+static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan,
+				       u8 slave_addr, u8 flags,
+				       u8 *buf, u8 num)
+{
+	struct drm_device *dev = chan->dev;
+	struct amdgpu_device *adev = dev->dev_private;
+	PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
+	unsigned char *base;
+	u16 out = cpu_to_le16(0);
+	int r = 0;
+
+	memset(&args, 0, sizeof(args));
+
+	mutex_lock(&chan->mutex);
+
+	base = (unsigned char *)adev->mode_info.atom_context->scratch;
+
+	if (flags & HW_I2C_WRITE) {
+		if (num > ATOM_MAX_HW_I2C_WRITE) {
+			DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
+			r = -EINVAL;
+			goto done;
+		}
+		if (buf == NULL)
+			args.ucRegIndex = 0;
+		else
+			args.ucRegIndex = buf[0];
+		if (num)
+			num--;
+		if (num)
+			memcpy(&out, &buf[1], num);
+		args.lpI2CDataOut = cpu_to_le16(out);
+	} else {
+		if (num > ATOM_MAX_HW_I2C_READ) {
+			DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
+			r = -EINVAL;
+			goto done;
+		}
+		args.ucRegIndex = 0;
+		args.lpI2CDataOut = 0;
+	}
+
+	args.ucFlag = flags;
+	args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
+	args.ucTransBytes = num;
+	args.ucSlaveAddr = slave_addr << 1;
+	args.ucLineNumber = chan->rec.i2c_id;
+
+	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	/* error */
+	if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
+		DRM_DEBUG_KMS("hw_i2c error\n");
+		r = -EIO;
+		goto done;
+	}
+
+	if (!(flags & HW_I2C_WRITE))
+		amdgpu_atombios_copy_swap(buf, base, num, false);
+
+done:
+	mutex_unlock(&chan->mutex);
+
+	return r;
+}
+
+int amdgpu_atombios_i2c_xfer(struct i2c_adapter *i2c_adap,
+		      struct i2c_msg *msgs, int num)
+{
+	struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+	struct i2c_msg *p;
+	int i, remaining, current_count, buffer_offset, max_bytes, ret;
+	u8 flags;
+
+	/* check for bus probe */
+	p = &msgs[0];
+	if ((num == 1) && (p->len == 0)) {
+		ret = amdgpu_atombios_i2c_process_i2c_ch(i2c,
+						  p->addr, HW_I2C_WRITE,
+						  NULL, 0);
+		if (ret)
+			return ret;
+		else
+			return num;
+	}
+
+	for (i = 0; i < num; i++) {
+		p = &msgs[i];
+		remaining = p->len;
+		buffer_offset = 0;
+		/* max_bytes are a limitation of ProcessI2cChannelTransaction not the hw */
+		if (p->flags & I2C_M_RD) {
+			max_bytes = ATOM_MAX_HW_I2C_READ;
+			flags = HW_I2C_READ;
+		} else {
+			max_bytes = ATOM_MAX_HW_I2C_WRITE;
+			flags = HW_I2C_WRITE;
+		}
+		while (remaining) {
+			if (remaining > max_bytes)
+				current_count = max_bytes;
+			else
+				current_count = remaining;
+			ret = amdgpu_atombios_i2c_process_i2c_ch(i2c,
+							  p->addr, flags,
+							  &p->buf[buffer_offset], current_count);
+			if (ret)
+				return ret;
+			remaining -= current_count;
+			buffer_offset += current_count;
+		}
+	}
+
+	return num;
+}
+
+u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+

+ 31 - 0
drivers/gpu/drm/amd/amdgpu/atombios_i2c.h

@@ -0,0 +1,31 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __ATOMBIOS_I2C_H__
+#define __ATOMBIOS_I2C_H__
+
+int amdgpu_atombios_i2c_xfer(struct i2c_adapter *i2c_adap,
+		      struct i2c_msg *msgs, int num);
+u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap);
+
+#endif

+ 6699 - 0
drivers/gpu/drm/amd/amdgpu/ci_dpm.c

@@ -0,0 +1,6699 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_ucode.h"
+#include "cikd.h"
+#include "amdgpu_dpm.h"
+#include "ci_dpm.h"
+#include "gfx_v7_0.h"
+#include "atom.h"
+#include <linux/seq_file.h>
+
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "bif/bif_4_1_d.h"
+#include "bif/bif_4_1_sh_mask.h"
+
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_sh_mask.h"
+
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+
+MODULE_FIRMWARE("radeon/bonaire_smc.bin");
+MODULE_FIRMWARE("radeon/hawaii_smc.bin");
+
+#define MC_CG_ARB_FREQ_F0           0x0a
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define MC_CG_ARB_FREQ_F2           0x0c
+#define MC_CG_ARB_FREQ_F3           0x0d
+
+#define SMC_RAM_END 0x40000
+
+#define VOLTAGE_SCALE               4
+#define VOLTAGE_VID_OFFSET_SCALE1    625
+#define VOLTAGE_VID_OFFSET_SCALE2    100
+
+static const struct ci_pt_defaults defaults_hawaii_xt =
+{
+	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
+	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
+	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+static const struct ci_pt_defaults defaults_hawaii_pro =
+{
+	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
+	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
+	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+static const struct ci_pt_defaults defaults_bonaire_xt =
+{
+	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+	{ 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
+	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
+};
+
+static const struct ci_pt_defaults defaults_bonaire_pro =
+{
+	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
+	{ 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
+	{ 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
+};
+
+static const struct ci_pt_defaults defaults_saturn_xt =
+{
+	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
+	{ 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
+	{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+static const struct ci_pt_defaults defaults_saturn_pro =
+{
+	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
+	{ 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
+	{ 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
+};
+
+static const struct ci_pt_config_reg didt_config_ci[] =
+{
+	{ 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+	{ 0xFFFFFFFF }
+};
+
+static u8 ci_get_memory_module_index(struct amdgpu_device *adev)
+{
+	return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff);
+}
+
+#define MC_CG_ARB_FREQ_F0           0x0a
+#define MC_CG_ARB_FREQ_F1           0x0b
+#define MC_CG_ARB_FREQ_F2           0x0c
+#define MC_CG_ARB_FREQ_F3           0x0d
+
+static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev,
+				       u32 arb_freq_src, u32 arb_freq_dest)
+{
+	u32 mc_arb_dram_timing;
+	u32 mc_arb_dram_timing2;
+	u32 burst_time;
+	u32 mc_cg_config;
+
+	switch (arb_freq_src) {
+	case MC_CG_ARB_FREQ_F0:
+		mc_arb_dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING);
+		mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
+		burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >>
+			 MC_ARB_BURST_TIME__STATE0__SHIFT;
+		break;
+	case MC_CG_ARB_FREQ_F1:
+		mc_arb_dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING_1);
+		mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1);
+		burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >>
+			 MC_ARB_BURST_TIME__STATE1__SHIFT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (arb_freq_dest) {
+	case MC_CG_ARB_FREQ_F0:
+		WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
+		WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
+		WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT),
+			~MC_ARB_BURST_TIME__STATE0_MASK);
+		break;
+	case MC_CG_ARB_FREQ_F1:
+		WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
+		WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
+		WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT),
+			~MC_ARB_BURST_TIME__STATE1_MASK);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F;
+	WREG32(mmMC_CG_CONFIG, mc_cg_config);
+	WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT,
+		~MC_ARB_CG__CG_ARB_REQ_MASK);
+
+	return 0;
+}
+
+static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
+{
+	u8 mc_para_index;
+
+	if (memory_clock < 10000)
+		mc_para_index = 0;
+	else if (memory_clock >= 80000)
+		mc_para_index = 0x0f;
+	else
+		mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
+	return mc_para_index;
+}
+
+static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
+{
+	u8 mc_para_index;
+
+	if (strobe_mode) {
+		if (memory_clock < 12500)
+			mc_para_index = 0x00;
+		else if (memory_clock > 47500)
+			mc_para_index = 0x0f;
+		else
+			mc_para_index = (u8)((memory_clock - 10000) / 2500);
+	} else {
+		if (memory_clock < 65000)
+			mc_para_index = 0x00;
+		else if (memory_clock > 135000)
+			mc_para_index = 0x0f;
+		else
+			mc_para_index = (u8)((memory_clock - 60000) / 5000);
+	}
+	return mc_para_index;
+}
+
+static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
+						     u32 max_voltage_steps,
+						     struct atom_voltage_table *voltage_table)
+{
+	unsigned int i, diff;
+
+	if (voltage_table->count <= max_voltage_steps)
+		return;
+
+	diff = voltage_table->count - max_voltage_steps;
+
+	for (i = 0; i < max_voltage_steps; i++)
+		voltage_table->entries[i] = voltage_table->entries[i + diff];
+
+	voltage_table->count = max_voltage_steps;
+}
+
+static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
+					 struct atom_voltage_table_entry *voltage_table,
+					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
+static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
+static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
+				       u32 target_tdp);
+static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
+static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev);
+static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
+
+static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
+							     PPSMC_Msg msg, u32 parameter);
+static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev);
+static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
+
+static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = adev->pm.dpm.priv;
+
+	return pi;
+}
+
+static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps)
+{
+	struct ci_ps *ps = rps->ps_priv;
+
+	return ps;
+}
+
+static void ci_initialize_powertune_defaults(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+
+	switch (adev->pdev->device) {
+	case 0x6649:
+	case 0x6650:
+	case 0x6651:
+	case 0x6658:
+	case 0x665C:
+	case 0x665D:
+	default:
+		pi->powertune_defaults = &defaults_bonaire_xt;
+		break;
+	case 0x6640:
+	case 0x6641:
+	case 0x6646:
+	case 0x6647:
+		pi->powertune_defaults = &defaults_saturn_xt;
+		break;
+	case 0x67B8:
+	case 0x67B0:
+		pi->powertune_defaults = &defaults_hawaii_xt;
+		break;
+	case 0x67BA:
+	case 0x67B1:
+		pi->powertune_defaults = &defaults_hawaii_pro;
+		break;
+	case 0x67A0:
+	case 0x67A1:
+	case 0x67A2:
+	case 0x67A8:
+	case 0x67A9:
+	case 0x67AA:
+	case 0x67B9:
+	case 0x67BE:
+		pi->powertune_defaults = &defaults_bonaire_xt;
+		break;
+	}
+
+	pi->dte_tj_offset = 0;
+
+	pi->caps_power_containment = true;
+	pi->caps_cac = false;
+	pi->caps_sq_ramping = false;
+	pi->caps_db_ramping = false;
+	pi->caps_td_ramping = false;
+	pi->caps_tcp_ramping = false;
+
+	if (pi->caps_power_containment) {
+		pi->caps_cac = true;
+		if (adev->asic_type == CHIP_HAWAII)
+			pi->enable_bapm_feature = false;
+		else
+			pi->enable_bapm_feature = true;
+		pi->enable_tdc_limit_feature = true;
+		pi->enable_pkg_pwr_tracking_feature = true;
+	}
+}
+
+static u8 ci_convert_to_vid(u16 vddc)
+{
+	return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
+}
+
+static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
+	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
+	u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
+	u32 i;
+
+	if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
+		return -EINVAL;
+	if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
+		return -EINVAL;
+	if (adev->pm.dpm.dyn_state.cac_leakage_table.count !=
+	    adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
+		return -EINVAL;
+
+	for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
+		if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+			lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
+			hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
+			hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
+		} else {
+			lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
+			hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
+		}
+	}
+	return 0;
+}
+
+static int ci_populate_vddc_vid(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u8 *vid = pi->smc_powertune_table.VddCVid;
+	u32 i;
+
+	if (pi->vddc_voltage_table.count > 8)
+		return -EINVAL;
+
+	for (i = 0; i < pi->vddc_voltage_table.count; i++)
+		vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
+
+	return 0;
+}
+
+static int ci_populate_svi_load_line(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+
+	pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
+	pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
+	pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
+	pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
+
+	return 0;
+}
+
+static int ci_populate_tdc_limit(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+	u16 tdc_limit;
+
+	tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
+	pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
+	pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+		pt_defaults->tdc_vddc_throttle_release_limit_perc;
+	pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
+
+	return 0;
+}
+
+static int ci_populate_dw8(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+	int ret;
+
+	ret = amdgpu_ci_read_smc_sram_dword(adev,
+				     SMU7_FIRMWARE_HEADER_LOCATION +
+				     offsetof(SMU7_Firmware_Header, PmFuseTable) +
+				     offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
+				     (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
+				     pi->sram_end);
+	if (ret)
+		return -EINVAL;
+	else
+		pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
+
+	return 0;
+}
+
+static int ci_populate_fuzzy_fan(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+
+	if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
+	    (adev->pm.dpm.fan.fan_output_sensitivity == 0))
+		adev->pm.dpm.fan.fan_output_sensitivity =
+			adev->pm.dpm.fan.default_fan_output_sensitivity;
+
+	pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
+		cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity);
+
+	return 0;
+}
+
+static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
+	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
+	int i, min, max;
+
+	min = max = hi_vid[0];
+	for (i = 0; i < 8; i++) {
+		if (0 != hi_vid[i]) {
+			if (min > hi_vid[i])
+				min = hi_vid[i];
+			if (max < hi_vid[i])
+				max = hi_vid[i];
+		}
+
+		if (0 != lo_vid[i]) {
+			if (min > lo_vid[i])
+				min = lo_vid[i];
+			if (max < lo_vid[i])
+				max = lo_vid[i];
+		}
+	}
+
+	if ((min == 0) || (max == 0))
+		return -EINVAL;
+	pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
+	pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
+
+	return 0;
+}
+
+static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
+	u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
+	struct amdgpu_cac_tdp_table *cac_tdp_table =
+		adev->pm.dpm.dyn_state.cac_tdp_table;
+
+	hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
+	lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
+
+	pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
+	pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
+
+	return 0;
+}
+
+static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+	SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
+	struct amdgpu_cac_tdp_table *cac_tdp_table =
+		adev->pm.dpm.dyn_state.cac_tdp_table;
+	struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
+	int i, j, k;
+	const u16 *def1;
+	const u16 *def2;
+
+	dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
+	dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
+
+	dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
+	dpm_table->GpuTjMax =
+		(u8)(pi->thermal_temp_setting.temperature_high / 1000);
+	dpm_table->GpuTjHyst = 8;
+
+	dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
+
+	if (ppm) {
+		dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
+		dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
+	} else {
+		dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
+		dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
+	}
+
+	dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
+	def1 = pt_defaults->bapmti_r;
+	def2 = pt_defaults->bapmti_rc;
+
+	for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
+		for (j = 0; j < SMU7_DTE_SOURCES; j++) {
+			for (k = 0; k < SMU7_DTE_SINKS; k++) {
+				dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
+				dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
+				def1++;
+				def2++;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int ci_populate_pm_base(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u32 pm_fuse_table_offset;
+	int ret;
+
+	if (pi->caps_power_containment) {
+		ret = amdgpu_ci_read_smc_sram_dword(adev,
+					     SMU7_FIRMWARE_HEADER_LOCATION +
+					     offsetof(SMU7_Firmware_Header, PmFuseTable),
+					     &pm_fuse_table_offset, pi->sram_end);
+		if (ret)
+			return ret;
+		ret = ci_populate_bapm_vddc_vid_sidd(adev);
+		if (ret)
+			return ret;
+		ret = ci_populate_vddc_vid(adev);
+		if (ret)
+			return ret;
+		ret = ci_populate_svi_load_line(adev);
+		if (ret)
+			return ret;
+		ret = ci_populate_tdc_limit(adev);
+		if (ret)
+			return ret;
+		ret = ci_populate_dw8(adev);
+		if (ret)
+			return ret;
+		ret = ci_populate_fuzzy_fan(adev);
+		if (ret)
+			return ret;
+		ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev);
+		if (ret)
+			return ret;
+		ret = ci_populate_bapm_vddc_base_leakage_sidd(adev);
+		if (ret)
+			return ret;
+		ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset,
+					   (u8 *)&pi->smc_powertune_table,
+					   sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u32 data;
+
+	if (pi->caps_sq_ramping) {
+		data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
+		if (enable)
+			data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
+		else
+			data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
+		WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
+	}
+
+	if (pi->caps_db_ramping) {
+		data = RREG32_DIDT(ixDIDT_DB_CTRL0);
+		if (enable)
+			data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
+		else
+			data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
+		WREG32_DIDT(ixDIDT_DB_CTRL0, data);
+	}
+
+	if (pi->caps_td_ramping) {
+		data = RREG32_DIDT(ixDIDT_TD_CTRL0);
+		if (enable)
+			data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
+		else
+			data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
+		WREG32_DIDT(ixDIDT_TD_CTRL0, data);
+	}
+
+	if (pi->caps_tcp_ramping) {
+		data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
+		if (enable)
+			data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
+		else
+			data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
+		WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
+	}
+}
+
+static int ci_program_pt_config_registers(struct amdgpu_device *adev,
+					  const struct ci_pt_config_reg *cac_config_regs)
+{
+	const struct ci_pt_config_reg *config_regs = cac_config_regs;
+	u32 data;
+	u32 cache = 0;
+
+	if (config_regs == NULL)
+		return -EINVAL;
+
+	while (config_regs->offset != 0xFFFFFFFF) {
+		if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
+			cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+		} else {
+			switch (config_regs->type) {
+			case CISLANDS_CONFIGREG_SMC_IND:
+				data = RREG32_SMC(config_regs->offset);
+				break;
+			case CISLANDS_CONFIGREG_DIDT_IND:
+				data = RREG32_DIDT(config_regs->offset);
+				break;
+			default:
+				data = RREG32(config_regs->offset);
+				break;
+			}
+
+			data &= ~config_regs->mask;
+			data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+			data |= cache;
+
+			switch (config_regs->type) {
+			case CISLANDS_CONFIGREG_SMC_IND:
+				WREG32_SMC(config_regs->offset, data);
+				break;
+			case CISLANDS_CONFIGREG_DIDT_IND:
+				WREG32_DIDT(config_regs->offset, data);
+				break;
+			default:
+				WREG32(config_regs->offset, data);
+				break;
+			}
+			cache = 0;
+		}
+		config_regs++;
+	}
+	return 0;
+}
+
+static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	int ret;
+
+	if (pi->caps_sq_ramping || pi->caps_db_ramping ||
+	    pi->caps_td_ramping || pi->caps_tcp_ramping) {
+		gfx_v7_0_enter_rlc_safe_mode(adev);
+
+		if (enable) {
+			ret = ci_program_pt_config_registers(adev, didt_config_ci);
+			if (ret) {
+				gfx_v7_0_exit_rlc_safe_mode(adev);
+				return ret;
+			}
+		}
+
+		ci_do_enable_didt(adev, enable);
+
+		gfx_v7_0_exit_rlc_safe_mode(adev);
+	}
+
+	return 0;
+}
+
+static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	PPSMC_Result smc_result;
+	int ret = 0;
+
+	if (enable) {
+		pi->power_containment_features = 0;
+		if (pi->caps_power_containment) {
+			if (pi->enable_bapm_feature) {
+				smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
+				if (smc_result != PPSMC_Result_OK)
+					ret = -EINVAL;
+				else
+					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
+			}
+
+			if (pi->enable_tdc_limit_feature) {
+				smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable);
+				if (smc_result != PPSMC_Result_OK)
+					ret = -EINVAL;
+				else
+					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
+			}
+
+			if (pi->enable_pkg_pwr_tracking_feature) {
+				smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable);
+				if (smc_result != PPSMC_Result_OK) {
+					ret = -EINVAL;
+				} else {
+					struct amdgpu_cac_tdp_table *cac_tdp_table =
+						adev->pm.dpm.dyn_state.cac_tdp_table;
+					u32 default_pwr_limit =
+						(u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
+
+					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
+
+					ci_set_power_limit(adev, default_pwr_limit);
+				}
+			}
+		}
+	} else {
+		if (pi->caps_power_containment && pi->power_containment_features) {
+			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
+				amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable);
+
+			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
+				amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
+
+			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
+				amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable);
+			pi->power_containment_features = 0;
+		}
+	}
+
+	return ret;
+}
+
+static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	PPSMC_Result smc_result;
+	int ret = 0;
+
+	if (pi->caps_cac) {
+		if (enable) {
+			smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
+			if (smc_result != PPSMC_Result_OK) {
+				ret = -EINVAL;
+				pi->cac_enabled = false;
+			} else {
+				pi->cac_enabled = true;
+			}
+		} else if (pi->cac_enabled) {
+			amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
+			pi->cac_enabled = false;
+		}
+	}
+
+	return ret;
+}
+
+static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev,
+					    bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	PPSMC_Result smc_result = PPSMC_Result_OK;
+
+	if (pi->thermal_sclk_dpm_enabled) {
+		if (enable)
+			smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM);
+		else
+			smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM);
+	}
+
+	if (smc_result == PPSMC_Result_OK)
+		return 0;
+	else
+		return -EINVAL;
+}
+
+static int ci_power_control_set_level(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct amdgpu_cac_tdp_table *cac_tdp_table =
+		adev->pm.dpm.dyn_state.cac_tdp_table;
+	s32 adjust_percent;
+	s32 target_tdp;
+	int ret = 0;
+	bool adjust_polarity = false; /* ??? */
+
+	if (pi->caps_power_containment) {
+		adjust_percent = adjust_polarity ?
+			adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment);
+		target_tdp = ((100 + adjust_percent) *
+			      (s32)cac_tdp_table->configurable_tdp) / 100;
+
+		ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp);
+	}
+
+	return ret;
+}
+
+static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+
+	if (pi->uvd_power_gated == gate)
+		return;
+
+	pi->uvd_power_gated = gate;
+
+	ci_update_uvd_dpm(adev, gate);
+}
+
+static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
+{
+	u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
+	u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
+
+	if (vblank_time < switch_limit)
+		return true;
+	else
+		return false;
+
+}
+
+static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
+					struct amdgpu_ps *rps)
+{
+	struct ci_ps *ps = ci_get_ps(rps);
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct amdgpu_clock_and_voltage_limits *max_limits;
+	bool disable_mclk_switching;
+	u32 sclk, mclk;
+	int i;
+
+	if (rps->vce_active) {
+		rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
+		rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
+	} else {
+		rps->evclk = 0;
+		rps->ecclk = 0;
+	}
+
+	if ((adev->pm.dpm.new_active_crtc_count > 1) ||
+	    ci_dpm_vblank_too_short(adev))
+		disable_mclk_switching = true;
+	else
+		disable_mclk_switching = false;
+
+	if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
+		pi->battery_state = true;
+	else
+		pi->battery_state = false;
+
+	if (adev->pm.dpm.ac_power)
+		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+	else
+		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+	if (adev->pm.dpm.ac_power == false) {
+		for (i = 0; i < ps->performance_level_count; i++) {
+			if (ps->performance_levels[i].mclk > max_limits->mclk)
+				ps->performance_levels[i].mclk = max_limits->mclk;
+			if (ps->performance_levels[i].sclk > max_limits->sclk)
+				ps->performance_levels[i].sclk = max_limits->sclk;
+		}
+	}
+
+	/* XXX validate the min clocks required for display */
+
+	if (disable_mclk_switching) {
+		mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
+		sclk = ps->performance_levels[0].sclk;
+	} else {
+		mclk = ps->performance_levels[0].mclk;
+		sclk = ps->performance_levels[0].sclk;
+	}
+
+	if (rps->vce_active) {
+		if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
+			sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
+		if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
+			mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
+	}
+
+	ps->performance_levels[0].sclk = sclk;
+	ps->performance_levels[0].mclk = mclk;
+
+	if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
+		ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
+
+	if (disable_mclk_switching) {
+		if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
+			ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
+	} else {
+		if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
+			ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
+	}
+}
+
+static int ci_thermal_set_temperature_range(struct amdgpu_device *adev,
+					    int min_temp, int max_temp)
+{
+	int low_temp = 0 * 1000;
+	int high_temp = 255 * 1000;
+	u32 tmp;
+
+	if (low_temp < min_temp)
+		low_temp = min_temp;
+	if (high_temp > max_temp)
+		high_temp = max_temp;
+	if (high_temp < low_temp) {
+		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+		return -EINVAL;
+	}
+
+	tmp = RREG32_SMC(ixCG_THERMAL_INT);
+	tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK);
+	tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) |
+		((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT;
+	WREG32_SMC(ixCG_THERMAL_INT, tmp);
+
+#if 0
+	/* XXX: need to figure out how to handle this properly */
+	tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
+	tmp &= DIG_THERM_DPM_MASK;
+	tmp |= DIG_THERM_DPM(high_temp / 1000);
+	WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
+#endif
+
+	adev->pm.dpm.thermal.min_temp = low_temp;
+	adev->pm.dpm.thermal.max_temp = high_temp;
+	return 0;
+}
+
+static int ci_thermal_enable_alert(struct amdgpu_device *adev,
+				   bool enable)
+{
+	u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
+	PPSMC_Result result;
+
+	if (enable) {
+		thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
+				 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
+		WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
+		result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable);
+		if (result != PPSMC_Result_OK) {
+			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
+			return -EINVAL;
+		}
+	} else {
+		thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
+			CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
+		WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
+		result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable);
+		if (result != PPSMC_Result_OK) {
+			DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u32 tmp;
+
+	if (pi->fan_ctrl_is_in_default_mode) {
+		tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK)
+			>> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
+		pi->fan_ctrl_default_mode = tmp;
+		tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK)
+			>> CG_FDO_CTRL2__TMIN__SHIFT;
+		pi->t_min = tmp;
+		pi->fan_ctrl_is_in_default_mode = false;
+	}
+
+	tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
+	tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
+	WREG32_SMC(ixCG_FDO_CTRL2, tmp);
+
+	tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
+	tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
+	WREG32_SMC(ixCG_FDO_CTRL2, tmp);
+}
+
+static int ci_thermal_setup_fan_table(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+	u32 duty100;
+	u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+	u16 fdo_min, slope1, slope2;
+	u32 reference_clock, tmp;
+	int ret;
+	u64 tmp64;
+
+	if (!pi->fan_table_start) {
+		adev->pm.dpm.fan.ucode_fan_control = false;
+		return 0;
+	}
+
+	duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
+		>> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
+
+	if (duty100 == 0) {
+		adev->pm.dpm.fan.ucode_fan_control = false;
+		return 0;
+	}
+
+	tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
+	do_div(tmp64, 10000);
+	fdo_min = (u16)tmp64;
+
+	t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
+	t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
+
+	pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
+	pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
+
+	slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+	slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+	fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
+	fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
+	fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
+
+	fan_table.Slope1 = cpu_to_be16(slope1);
+	fan_table.Slope2 = cpu_to_be16(slope2);
+
+	fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+	fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
+
+	fan_table.HystUp = cpu_to_be16(1);
+
+	fan_table.HystSlope = cpu_to_be16(1);
+
+	fan_table.TempRespLim = cpu_to_be16(5);
+
+	reference_clock = amdgpu_asic_get_xclk(adev);
+
+	fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
+					       reference_clock) / 1600);
+
+	fan_table.FdoMax = cpu_to_be16((u16)duty100);
+
+	tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK)
+		>> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
+	fan_table.TempSrc = (uint8_t)tmp;
+
+	ret = amdgpu_ci_copy_bytes_to_smc(adev,
+					  pi->fan_table_start,
+					  (u8 *)(&fan_table),
+					  sizeof(fan_table),
+					  pi->sram_end);
+
+	if (ret) {
+		DRM_ERROR("Failed to load fan table to the SMC.");
+		adev->pm.dpm.fan.ucode_fan_control = false;
+	}
+
+	return 0;
+}
+
+static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	PPSMC_Result ret;
+
+	if (pi->caps_od_fuzzy_fan_control_support) {
+		ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+							       PPSMC_StartFanControl,
+							       FAN_CONTROL_FUZZY);
+		if (ret != PPSMC_Result_OK)
+			return -EINVAL;
+		ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+							       PPSMC_MSG_SetFanPwmMax,
+							       adev->pm.dpm.fan.default_max_fan_pwm);
+		if (ret != PPSMC_Result_OK)
+			return -EINVAL;
+	} else {
+		ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+							       PPSMC_StartFanControl,
+							       FAN_CONTROL_TABLE);
+		if (ret != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	pi->fan_is_controlled_by_smc = true;
+	return 0;
+}
+
+
+static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
+{
+	PPSMC_Result ret;
+	struct ci_power_info *pi = ci_get_pi(adev);
+
+	ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl);
+	if (ret == PPSMC_Result_OK) {
+		pi->fan_is_controlled_by_smc = false;
+		return 0;
+	} else {
+		return -EINVAL;
+	}
+}
+
+static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
+					u32 *speed)
+{
+	u32 duty, duty100;
+	u64 tmp64;
+
+	if (adev->pm.no_fan)
+		return -ENOENT;
+
+	duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
+		>> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
+	duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK)
+		>> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
+
+	if (duty100 == 0)
+		return -EINVAL;
+
+	tmp64 = (u64)duty * 100;
+	do_div(tmp64, duty100);
+	*speed = (u32)tmp64;
+
+	if (*speed > 100)
+		*speed = 100;
+
+	return 0;
+}
+
+static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
+					u32 speed)
+{
+	u32 tmp;
+	u32 duty, duty100;
+	u64 tmp64;
+	struct ci_power_info *pi = ci_get_pi(adev);
+
+	if (adev->pm.no_fan)
+		return -ENOENT;
+
+	if (pi->fan_is_controlled_by_smc)
+		return -EINVAL;
+
+	if (speed > 100)
+		return -EINVAL;
+
+	duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
+		>> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
+
+	if (duty100 == 0)
+		return -EINVAL;
+
+	tmp64 = (u64)speed * duty100;
+	do_div(tmp64, 100);
+	duty = (u32)tmp64;
+
+	tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
+	tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
+	WREG32_SMC(ixCG_FDO_CTRL0, tmp);
+
+	return 0;
+}
+
+static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
+{
+	if (mode) {
+		/* stop auto-manage */
+		if (adev->pm.dpm.fan.ucode_fan_control)
+			ci_fan_ctrl_stop_smc_fan_control(adev);
+		ci_fan_ctrl_set_static_mode(adev, mode);
+	} else {
+		/* restart auto-manage */
+		if (adev->pm.dpm.fan.ucode_fan_control)
+			ci_thermal_start_smc_fan_control(adev);
+		else
+			ci_fan_ctrl_set_default_mode(adev);
+	}
+}
+
+static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u32 tmp;
+
+	if (pi->fan_is_controlled_by_smc)
+		return 0;
+
+	tmp = RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
+	return (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT);
+}
+
+#if 0
+static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
+					 u32 *speed)
+{
+	u32 tach_period;
+	u32 xclk = amdgpu_asic_get_xclk(adev);
+
+	if (adev->pm.no_fan)
+		return -ENOENT;
+
+	if (adev->pm.fan_pulses_per_revolution == 0)
+		return -ENOENT;
+
+	tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK)
+		>> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
+	if (tach_period == 0)
+		return -ENOENT;
+
+	*speed = 60 * xclk * 10000 / tach_period;
+
+	return 0;
+}
+
+static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
+					 u32 speed)
+{
+	u32 tach_period, tmp;
+	u32 xclk = amdgpu_asic_get_xclk(adev);
+
+	if (adev->pm.no_fan)
+		return -ENOENT;
+
+	if (adev->pm.fan_pulses_per_revolution == 0)
+		return -ENOENT;
+
+	if ((speed < adev->pm.fan_min_rpm) ||
+	    (speed > adev->pm.fan_max_rpm))
+		return -EINVAL;
+
+	if (adev->pm.dpm.fan.ucode_fan_control)
+		ci_fan_ctrl_stop_smc_fan_control(adev);
+
+	tach_period = 60 * xclk * 10000 / (8 * speed);
+	tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
+	tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
+	WREG32_SMC(CG_TACH_CTRL, tmp);
+
+	ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
+
+	return 0;
+}
+#endif
+
+static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u32 tmp;
+
+	if (!pi->fan_ctrl_is_in_default_mode) {
+		tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
+		tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
+		WREG32_SMC(ixCG_FDO_CTRL2, tmp);
+
+		tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
+		tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
+		WREG32_SMC(ixCG_FDO_CTRL2, tmp);
+		pi->fan_ctrl_is_in_default_mode = true;
+	}
+}
+
+static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev)
+{
+	if (adev->pm.dpm.fan.ucode_fan_control) {
+		ci_fan_ctrl_start_smc_fan_control(adev);
+		ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
+	}
+}
+
+static void ci_thermal_initialize(struct amdgpu_device *adev)
+{
+	u32 tmp;
+
+	if (adev->pm.fan_pulses_per_revolution) {
+		tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
+		tmp |= (adev->pm.fan_pulses_per_revolution - 1)
+			<< CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
+		WREG32_SMC(ixCG_TACH_CTRL, tmp);
+	}
+
+	tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
+	tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
+	WREG32_SMC(ixCG_FDO_CTRL2, tmp);
+}
+
+static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev)
+{
+	int ret;
+
+	ci_thermal_initialize(adev);
+	ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX);
+	if (ret)
+		return ret;
+	ret = ci_thermal_enable_alert(adev, true);
+	if (ret)
+		return ret;
+	if (adev->pm.dpm.fan.ucode_fan_control) {
+		ret = ci_thermal_setup_fan_table(adev);
+		if (ret)
+			return ret;
+		ci_thermal_start_smc_fan_control(adev);
+	}
+
+	return 0;
+}
+
+static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev)
+{
+	if (!adev->pm.no_fan)
+		ci_fan_ctrl_set_default_mode(adev);
+}
+
+#if 0
+static int ci_read_smc_soft_register(struct amdgpu_device *adev,
+				     u16 reg_offset, u32 *value)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+
+	return amdgpu_ci_read_smc_sram_dword(adev,
+				      pi->soft_regs_start + reg_offset,
+				      value, pi->sram_end);
+}
+#endif
+
+static int ci_write_smc_soft_register(struct amdgpu_device *adev,
+				      u16 reg_offset, u32 value)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+
+	return amdgpu_ci_write_smc_sram_dword(adev,
+				       pi->soft_regs_start + reg_offset,
+				       value, pi->sram_end);
+}
+
+static void ci_init_fps_limits(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
+
+	if (pi->caps_fps) {
+		u16 tmp;
+
+		tmp = 45;
+		table->FpsHighT = cpu_to_be16(tmp);
+
+		tmp = 30;
+		table->FpsLowT = cpu_to_be16(tmp);
+	}
+}
+
+static int ci_update_sclk_t(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	int ret = 0;
+	u32 low_sclk_interrupt_t = 0;
+
+	if (pi->caps_sclk_throttle_low_notification) {
+		low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
+
+		ret = amdgpu_ci_copy_bytes_to_smc(adev,
+					   pi->dpm_table_start +
+					   offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
+					   (u8 *)&low_sclk_interrupt_t,
+					   sizeof(u32), pi->sram_end);
+
+	}
+
+	return ret;
+}
+
+static void ci_get_leakage_voltages(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u16 leakage_id, virtual_voltage_id;
+	u16 vddc, vddci;
+	int i;
+
+	pi->vddc_leakage.count = 0;
+	pi->vddci_leakage.count = 0;
+
+	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
+			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+			if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0)
+				continue;
+			if (vddc != 0 && vddc != virtual_voltage_id) {
+				pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
+				pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
+				pi->vddc_leakage.count++;
+			}
+		}
+	} else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) {
+		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
+			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+			if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci,
+										     virtual_voltage_id,
+										     leakage_id) == 0) {
+				if (vddc != 0 && vddc != virtual_voltage_id) {
+					pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
+					pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
+					pi->vddc_leakage.count++;
+				}
+				if (vddci != 0 && vddci != virtual_voltage_id) {
+					pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
+					pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
+					pi->vddci_leakage.count++;
+				}
+			}
+		}
+	}
+}
+
+static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	bool want_thermal_protection;
+	enum amdgpu_dpm_event_src dpm_event_src;
+	u32 tmp;
+
+	switch (sources) {
+	case 0:
+	default:
+		want_thermal_protection = false;
+		break;
+	case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
+		want_thermal_protection = true;
+		dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
+		break;
+	case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+		want_thermal_protection = true;
+		dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
+		break;
+	case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+	      (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
+		want_thermal_protection = true;
+		dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
+		break;
+	}
+
+	if (want_thermal_protection) {
+#if 0
+		/* XXX: need to figure out how to handle this properly */
+		tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
+		tmp &= DPM_EVENT_SRC_MASK;
+		tmp |= DPM_EVENT_SRC(dpm_event_src);
+		WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
+#endif
+
+		tmp = RREG32_SMC(ixGENERAL_PWRMGT);
+		if (pi->thermal_protection)
+			tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
+		else
+			tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
+		WREG32_SMC(ixGENERAL_PWRMGT, tmp);
+	} else {
+		tmp = RREG32_SMC(ixGENERAL_PWRMGT);
+		tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
+		WREG32_SMC(ixGENERAL_PWRMGT, tmp);
+	}
+}
+
+static void ci_enable_auto_throttle_source(struct amdgpu_device *adev,
+					   enum amdgpu_dpm_auto_throttle_src source,
+					   bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+
+	if (enable) {
+		if (!(pi->active_auto_throttle_sources & (1 << source))) {
+			pi->active_auto_throttle_sources |= 1 << source;
+			ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
+		}
+	} else {
+		if (pi->active_auto_throttle_sources & (1 << source)) {
+			pi->active_auto_throttle_sources &= ~(1 << source);
+			ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
+		}
+	}
+}
+
+static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev)
+{
+	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
+		amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
+}
+
+static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	PPSMC_Result smc_result;
+
+	if (!pi->need_update_smu7_dpm_table)
+		return 0;
+
+	if ((!pi->sclk_dpm_key_disabled) &&
+	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
+		smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+		if (smc_result != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	if ((!pi->mclk_dpm_key_disabled) &&
+	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
+		smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+		if (smc_result != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	pi->need_update_smu7_dpm_table = 0;
+	return 0;
+}
+
+static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	PPSMC_Result smc_result;
+
+	if (enable) {
+		if (!pi->sclk_dpm_key_disabled) {
+			smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable);
+			if (smc_result != PPSMC_Result_OK)
+				return -EINVAL;
+		}
+
+		if (!pi->mclk_dpm_key_disabled) {
+			smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable);
+			if (smc_result != PPSMC_Result_OK)
+				return -EINVAL;
+
+			WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK,
+					~MC_SEQ_CNTL_3__CAC_EN_MASK);
+
+			WREG32_SMC(ixLCAC_MC0_CNTL, 0x05);
+			WREG32_SMC(ixLCAC_MC1_CNTL, 0x05);
+			WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005);
+
+			udelay(10);
+
+			WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005);
+			WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005);
+			WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005);
+		}
+	} else {
+		if (!pi->sclk_dpm_key_disabled) {
+			smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable);
+			if (smc_result != PPSMC_Result_OK)
+				return -EINVAL;
+		}
+
+		if (!pi->mclk_dpm_key_disabled) {
+			smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable);
+			if (smc_result != PPSMC_Result_OK)
+				return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int ci_start_dpm(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	PPSMC_Result smc_result;
+	int ret;
+	u32 tmp;
+
+	tmp = RREG32_SMC(ixGENERAL_PWRMGT);
+	tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
+	WREG32_SMC(ixGENERAL_PWRMGT, tmp);
+
+	tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
+	tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
+	WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
+
+	ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
+
+	WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK);
+
+	smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable);
+	if (smc_result != PPSMC_Result_OK)
+		return -EINVAL;
+
+	ret = ci_enable_sclk_mclk_dpm(adev, true);
+	if (ret)
+		return ret;
+
+	if (!pi->pcie_dpm_key_disabled) {
+		smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable);
+		if (smc_result != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	PPSMC_Result smc_result;
+
+	if (!pi->need_update_smu7_dpm_table)
+		return 0;
+
+	if ((!pi->sclk_dpm_key_disabled) &&
+	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
+		smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel);
+		if (smc_result != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	if ((!pi->mclk_dpm_key_disabled) &&
+	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
+		smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel);
+		if (smc_result != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ci_stop_dpm(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	PPSMC_Result smc_result;
+	int ret;
+	u32 tmp;
+
+	tmp = RREG32_SMC(ixGENERAL_PWRMGT);
+	tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
+	WREG32_SMC(ixGENERAL_PWRMGT, tmp);
+
+	tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
+	tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
+	WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
+
+	if (!pi->pcie_dpm_key_disabled) {
+		smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable);
+		if (smc_result != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	ret = ci_enable_sclk_mclk_dpm(adev, false);
+	if (ret)
+		return ret;
+
+	smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable);
+	if (smc_result != PPSMC_Result_OK)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable)
+{
+	u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
+
+	if (enable)
+		tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
+	else
+		tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
+	WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
+}
+
+#if 0
+static int ci_notify_hw_of_power_source(struct amdgpu_device *adev,
+					bool ac_power)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct amdgpu_cac_tdp_table *cac_tdp_table =
+		adev->pm.dpm.dyn_state.cac_tdp_table;
+	u32 power_limit;
+
+	if (ac_power)
+		power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
+	else
+		power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
+
+	ci_set_power_limit(adev, power_limit);
+
+	if (pi->caps_automatic_dc_transition) {
+		if (ac_power)
+			amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC);
+		else
+			amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp);
+	}
+
+	return 0;
+}
+#endif
+
+static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
+						      PPSMC_Msg msg, u32 parameter)
+{
+	WREG32(mmSMC_MSG_ARG_0, parameter);
+	return amdgpu_ci_send_msg_to_smc(adev, msg);
+}
+
+static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev,
+							PPSMC_Msg msg, u32 *parameter)
+{
+	PPSMC_Result smc_result;
+
+	smc_result = amdgpu_ci_send_msg_to_smc(adev, msg);
+
+	if ((smc_result == PPSMC_Result_OK) && parameter)
+		*parameter = RREG32(mmSMC_MSG_ARG_0);
+
+	return smc_result;
+}
+
+static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+
+	if (!pi->sclk_dpm_key_disabled) {
+		PPSMC_Result smc_result =
+			amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
+		if (smc_result != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+
+	if (!pi->mclk_dpm_key_disabled) {
+		PPSMC_Result smc_result =
+			amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
+		if (smc_result != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+
+	if (!pi->pcie_dpm_key_disabled) {
+		PPSMC_Result smc_result =
+			amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
+		if (smc_result != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ci_set_power_limit(struct amdgpu_device *adev, u32 n)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+
+	if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
+		PPSMC_Result smc_result =
+			amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n);
+		if (smc_result != PPSMC_Result_OK)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
+				       u32 target_tdp)
+{
+	PPSMC_Result smc_result =
+		amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
+	if (smc_result != PPSMC_Result_OK)
+		return -EINVAL;
+	return 0;
+}
+
+#if 0
+static int ci_set_boot_state(struct amdgpu_device *adev)
+{
+	return ci_enable_sclk_mclk_dpm(adev, false);
+}
+#endif
+
+static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev)
+{
+	u32 sclk_freq;
+	PPSMC_Result smc_result =
+		amdgpu_ci_send_msg_to_smc_return_parameter(adev,
+						    PPSMC_MSG_API_GetSclkFrequency,
+						    &sclk_freq);
+	if (smc_result != PPSMC_Result_OK)
+		sclk_freq = 0;
+
+	return sclk_freq;
+}
+
+static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev)
+{
+	u32 mclk_freq;
+	PPSMC_Result smc_result =
+		amdgpu_ci_send_msg_to_smc_return_parameter(adev,
+						    PPSMC_MSG_API_GetMclkFrequency,
+						    &mclk_freq);
+	if (smc_result != PPSMC_Result_OK)
+		mclk_freq = 0;
+
+	return mclk_freq;
+}
+
+static void ci_dpm_start_smc(struct amdgpu_device *adev)
+{
+	int i;
+
+	amdgpu_ci_program_jump_on_start(adev);
+	amdgpu_ci_start_smc_clock(adev);
+	amdgpu_ci_start_smc(adev);
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
+			break;
+	}
+}
+
+static void ci_dpm_stop_smc(struct amdgpu_device *adev)
+{
+	amdgpu_ci_reset_smc(adev);
+	amdgpu_ci_stop_smc_clock(adev);
+}
+
+static int ci_process_firmware_header(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u32 tmp;
+	int ret;
+
+	ret = amdgpu_ci_read_smc_sram_dword(adev,
+				     SMU7_FIRMWARE_HEADER_LOCATION +
+				     offsetof(SMU7_Firmware_Header, DpmTable),
+				     &tmp, pi->sram_end);
+	if (ret)
+		return ret;
+
+	pi->dpm_table_start = tmp;
+
+	ret = amdgpu_ci_read_smc_sram_dword(adev,
+				     SMU7_FIRMWARE_HEADER_LOCATION +
+				     offsetof(SMU7_Firmware_Header, SoftRegisters),
+				     &tmp, pi->sram_end);
+	if (ret)
+		return ret;
+
+	pi->soft_regs_start = tmp;
+
+	ret = amdgpu_ci_read_smc_sram_dword(adev,
+				     SMU7_FIRMWARE_HEADER_LOCATION +
+				     offsetof(SMU7_Firmware_Header, mcRegisterTable),
+				     &tmp, pi->sram_end);
+	if (ret)
+		return ret;
+
+	pi->mc_reg_table_start = tmp;
+
+	ret = amdgpu_ci_read_smc_sram_dword(adev,
+				     SMU7_FIRMWARE_HEADER_LOCATION +
+				     offsetof(SMU7_Firmware_Header, FanTable),
+				     &tmp, pi->sram_end);
+	if (ret)
+		return ret;
+
+	pi->fan_table_start = tmp;
+
+	ret = amdgpu_ci_read_smc_sram_dword(adev,
+				     SMU7_FIRMWARE_HEADER_LOCATION +
+				     offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
+				     &tmp, pi->sram_end);
+	if (ret)
+		return ret;
+
+	pi->arb_table_start = tmp;
+
+	return 0;
+}
+
+static void ci_read_clock_registers(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+
+	pi->clock_registers.cg_spll_func_cntl =
+		RREG32_SMC(ixCG_SPLL_FUNC_CNTL);
+	pi->clock_registers.cg_spll_func_cntl_2 =
+		RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2);
+	pi->clock_registers.cg_spll_func_cntl_3 =
+		RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3);
+	pi->clock_registers.cg_spll_func_cntl_4 =
+		RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4);
+	pi->clock_registers.cg_spll_spread_spectrum =
+		RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
+	pi->clock_registers.cg_spll_spread_spectrum_2 =
+		RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2);
+	pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL);
+	pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL);
+	pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL);
+	pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL);
+	pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL);
+	pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1);
+	pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2);
+	pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1);
+	pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2);
+}
+
+static void ci_init_sclk_t(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+
+	pi->low_sclk_interrupt_t = 0;
+}
+
+static void ci_enable_thermal_protection(struct amdgpu_device *adev,
+					 bool enable)
+{
+	u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
+
+	if (enable)
+		tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
+	else
+		tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
+	WREG32_SMC(ixGENERAL_PWRMGT, tmp);
+}
+
+static void ci_enable_acpi_power_management(struct amdgpu_device *adev)
+{
+	u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
+
+	tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK;
+
+	WREG32_SMC(ixGENERAL_PWRMGT, tmp);
+}
+
+#if 0
+static int ci_enter_ulp_state(struct amdgpu_device *adev)
+{
+
+	WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
+
+	udelay(25000);
+
+	return 0;
+}
+
+static int ci_exit_ulp_state(struct amdgpu_device *adev)
+{
+	int i;
+
+	WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
+
+	udelay(7000);
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (RREG32(mmSMC_RESP_0) == 1)
+			break;
+		udelay(1000);
+	}
+
+	return 0;
+}
+#endif
+
+static int ci_notify_smc_display_change(struct amdgpu_device *adev,
+					bool has_display)
+{
+	PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
+
+	return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
+}
+
+static int ci_enable_ds_master_switch(struct amdgpu_device *adev,
+				      bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+
+	if (enable) {
+		if (pi->caps_sclk_ds) {
+			if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
+				return -EINVAL;
+		} else {
+			if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
+				return -EINVAL;
+		}
+	} else {
+		if (pi->caps_sclk_ds) {
+			if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
+				return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void ci_program_display_gap(struct amdgpu_device *adev)
+{
+	u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
+	u32 pre_vbi_time_in_us;
+	u32 frame_time_in_us;
+	u32 ref_clock = adev->clock.spll.reference_freq;
+	u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev);
+	u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
+
+	tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK;
+	if (adev->pm.dpm.new_active_crtc_count > 0)
+		tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
+	else
+		tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
+	WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
+
+	if (refresh_rate == 0)
+		refresh_rate = 60;
+	if (vblank_time == 0xffffffff)
+		vblank_time = 500;
+	frame_time_in_us = 1000000 / refresh_rate;
+	pre_vbi_time_in_us =
+		frame_time_in_us - 200 - vblank_time;
+	tmp = pre_vbi_time_in_us * (ref_clock / 100);
+
+	WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp);
+	ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
+	ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
+
+
+	ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1));
+
+}
+
+static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u32 tmp;
+
+	if (enable) {
+		if (pi->caps_sclk_ss_support) {
+			tmp = RREG32_SMC(ixGENERAL_PWRMGT);
+			tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
+			WREG32_SMC(ixGENERAL_PWRMGT, tmp);
+		}
+	} else {
+		tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
+		tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
+		WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp);
+
+		tmp = RREG32_SMC(ixGENERAL_PWRMGT);
+		tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
+		WREG32_SMC(ixGENERAL_PWRMGT, tmp);
+	}
+}
+
+static void ci_program_sstp(struct amdgpu_device *adev)
+{
+	WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER,
+	((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) |
+	 (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT)));
+}
+
+static void ci_enable_display_gap(struct amdgpu_device *adev)
+{
+	u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
+
+	tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK |
+			CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK);
+	tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) |
+		(AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT));
+
+	WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
+}
+
+static void ci_program_vc(struct amdgpu_device *adev)
+{
+	u32 tmp;
+
+	tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
+	tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
+	WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
+
+	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0);
+	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1);
+	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2);
+	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3);
+	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4);
+	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5);
+	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6);
+	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7);
+}
+
+static void ci_clear_vc(struct amdgpu_device *adev)
+{
+	u32 tmp;
+
+	tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
+	tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
+	WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
+
+	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
+	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0);
+	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0);
+	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0);
+	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0);
+	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0);
+	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0);
+	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0);
+}
+
+static int ci_upload_firmware(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	int i, ret;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
+			break;
+	}
+	WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1);
+
+	amdgpu_ci_stop_smc_clock(adev);
+	amdgpu_ci_reset_smc(adev);
+
+	ret = amdgpu_ci_load_smc_ucode(adev, pi->sram_end);
+
+	return ret;
+
+}
+
+static int ci_get_svi2_voltage_table(struct amdgpu_device *adev,
+				     struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
+				     struct atom_voltage_table *voltage_table)
+{
+	u32 i;
+
+	if (voltage_dependency_table == NULL)
+		return -EINVAL;
+
+	voltage_table->mask_low = 0;
+	voltage_table->phase_delay = 0;
+
+	voltage_table->count = voltage_dependency_table->count;
+	for (i = 0; i < voltage_table->count; i++) {
+		voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
+		voltage_table->entries[i].smio_low = 0;
+	}
+
+	return 0;
+}
+
+static int ci_construct_voltage_tables(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	int ret;
+
+	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
+		ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
+							VOLTAGE_OBJ_GPIO_LUT,
+							&pi->vddc_voltage_table);
+		if (ret)
+			return ret;
+	} else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+		ret = ci_get_svi2_voltage_table(adev,
+						&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+						&pi->vddc_voltage_table);
+		if (ret)
+			return ret;
+	}
+
+	if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
+		ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC,
+							 &pi->vddc_voltage_table);
+
+	if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
+		ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
+							VOLTAGE_OBJ_GPIO_LUT,
+							&pi->vddci_voltage_table);
+		if (ret)
+			return ret;
+	} else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+		ret = ci_get_svi2_voltage_table(adev,
+						&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+						&pi->vddci_voltage_table);
+		if (ret)
+			return ret;
+	}
+
+	if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
+		ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI,
+							 &pi->vddci_voltage_table);
+
+	if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
+		ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
+							VOLTAGE_OBJ_GPIO_LUT,
+							&pi->mvdd_voltage_table);
+		if (ret)
+			return ret;
+	} else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+		ret = ci_get_svi2_voltage_table(adev,
+						&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
+						&pi->mvdd_voltage_table);
+		if (ret)
+			return ret;
+	}
+
+	if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
+		ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD,
+							 &pi->mvdd_voltage_table);
+
+	return 0;
+}
+
+static void ci_populate_smc_voltage_table(struct amdgpu_device *adev,
+					  struct atom_voltage_table_entry *voltage_table,
+					  SMU7_Discrete_VoltageLevel *smc_voltage_table)
+{
+	int ret;
+
+	ret = ci_get_std_voltage_value_sidd(adev, voltage_table,
+					    &smc_voltage_table->StdVoltageHiSidd,
+					    &smc_voltage_table->StdVoltageLoSidd);
+
+	if (ret) {
+		smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
+		smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
+	}
+
+	smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
+	smc_voltage_table->StdVoltageHiSidd =
+		cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
+	smc_voltage_table->StdVoltageLoSidd =
+		cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
+}
+
+static int ci_populate_smc_vddc_table(struct amdgpu_device *adev,
+				      SMU7_Discrete_DpmTable *table)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	unsigned int count;
+
+	table->VddcLevelCount = pi->vddc_voltage_table.count;
+	for (count = 0; count < table->VddcLevelCount; count++) {
+		ci_populate_smc_voltage_table(adev,
+					      &pi->vddc_voltage_table.entries[count],
+					      &table->VddcLevel[count]);
+
+		if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
+			table->VddcLevel[count].Smio |=
+				pi->vddc_voltage_table.entries[count].smio_low;
+		else
+			table->VddcLevel[count].Smio = 0;
+	}
+	table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
+
+	return 0;
+}
+
+static int ci_populate_smc_vddci_table(struct amdgpu_device *adev,
+				       SMU7_Discrete_DpmTable *table)
+{
+	unsigned int count;
+	struct ci_power_info *pi = ci_get_pi(adev);
+
+	table->VddciLevelCount = pi->vddci_voltage_table.count;
+	for (count = 0; count < table->VddciLevelCount; count++) {
+		ci_populate_smc_voltage_table(adev,
+					      &pi->vddci_voltage_table.entries[count],
+					      &table->VddciLevel[count]);
+
+		if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
+			table->VddciLevel[count].Smio |=
+				pi->vddci_voltage_table.entries[count].smio_low;
+		else
+			table->VddciLevel[count].Smio = 0;
+	}
+	table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
+
+	return 0;
+}
+
+static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev,
+				      SMU7_Discrete_DpmTable *table)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	unsigned int count;
+
+	table->MvddLevelCount = pi->mvdd_voltage_table.count;
+	for (count = 0; count < table->MvddLevelCount; count++) {
+		ci_populate_smc_voltage_table(adev,
+					      &pi->mvdd_voltage_table.entries[count],
+					      &table->MvddLevel[count]);
+
+		if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
+			table->MvddLevel[count].Smio |=
+				pi->mvdd_voltage_table.entries[count].smio_low;
+		else
+			table->MvddLevel[count].Smio = 0;
+	}
+	table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
+
+	return 0;
+}
+
+static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev,
+					  SMU7_Discrete_DpmTable *table)
+{
+	int ret;
+
+	ret = ci_populate_smc_vddc_table(adev, table);
+	if (ret)
+		return ret;
+
+	ret = ci_populate_smc_vddci_table(adev, table);
+	if (ret)
+		return ret;
+
+	ret = ci_populate_smc_mvdd_table(adev, table);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
+				  SMU7_Discrete_VoltageLevel *voltage)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u32 i = 0;
+
+	if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
+		for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
+			if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
+				voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
+				break;
+			}
+		}
+
+		if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
+			return -EINVAL;
+	}
+
+	return -EINVAL;
+}
+
+static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
+					 struct atom_voltage_table_entry *voltage_table,
+					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
+{
+	u16 v_index, idx;
+	bool voltage_found = false;
+	*std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
+	*std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
+
+	if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
+		return -EINVAL;
+
+	if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
+		for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+			if (voltage_table->value ==
+			    adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+				voltage_found = true;
+				if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
+					idx = v_index;
+				else
+					idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
+				*std_voltage_lo_sidd =
+					adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
+				*std_voltage_hi_sidd =
+					adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
+				break;
+			}
+		}
+
+		if (!voltage_found) {
+			for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+				if (voltage_table->value <=
+				    adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+					voltage_found = true;
+					if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
+						idx = v_index;
+					else
+						idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
+					*std_voltage_lo_sidd =
+						adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
+					*std_voltage_hi_sidd =
+						adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
+					break;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev,
+						  const struct amdgpu_phase_shedding_limits_table *limits,
+						  u32 sclk,
+						  u32 *phase_shedding)
+{
+	unsigned int i;
+
+	*phase_shedding = 1;
+
+	for (i = 0; i < limits->count; i++) {
+		if (sclk < limits->entries[i].sclk) {
+			*phase_shedding = i;
+			break;
+		}
+	}
+}
+
+static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev,
+						  const struct amdgpu_phase_shedding_limits_table *limits,
+						  u32 mclk,
+						  u32 *phase_shedding)
+{
+	unsigned int i;
+
+	*phase_shedding = 1;
+
+	for (i = 0; i < limits->count; i++) {
+		if (mclk < limits->entries[i].mclk) {
+			*phase_shedding = i;
+			break;
+		}
+	}
+}
+
+static int ci_init_arb_table_index(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u32 tmp;
+	int ret;
+
+	ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start,
+				     &tmp, pi->sram_end);
+	if (ret)
+		return ret;
+
+	tmp &= 0x00FFFFFF;
+	tmp |= MC_CG_ARB_FREQ_F1 << 24;
+
+	return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start,
+				       tmp, pi->sram_end);
+}
+
+static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
+					 struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table,
+					 u32 clock, u32 *voltage)
+{
+	u32 i = 0;
+
+	if (allowed_clock_voltage_table->count == 0)
+		return -EINVAL;
+
+	for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+		if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+			*voltage = allowed_clock_voltage_table->entries[i].v;
+			return 0;
+		}
+	}
+
+	*voltage = allowed_clock_voltage_table->entries[i-1].v;
+
+	return 0;
+}
+
+static u8 ci_get_sleep_divider_id_from_clock(struct amdgpu_device *adev,
+					     u32 sclk, u32 min_sclk_in_sr)
+{
+	u32 i;
+	u32 tmp;
+	u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
+		min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
+
+	if (sclk < min)
+		return 0;
+
+	for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
+		tmp = sclk / (1 << i);
+		if (tmp >= min || i == 0)
+			break;
+	}
+
+	return (u8)i;
+}
+
+static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
+{
+	return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
+}
+
+static int ci_reset_to_default(struct amdgpu_device *adev)
+{
+	return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev)
+{
+	u32 tmp;
+
+	tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8;
+
+	if (tmp == MC_CG_ARB_FREQ_F0)
+		return 0;
+
+	return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
+}
+
+static void ci_register_patching_mc_arb(struct amdgpu_device *adev,
+					const u32 engine_clock,
+					const u32 memory_clock,
+					u32 *dram_timimg2)
+{
+	bool patch;
+	u32 tmp, tmp2;
+
+	tmp = RREG32(mmMC_SEQ_MISC0);
+	patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
+
+	if (patch &&
+	    ((adev->pdev->device == 0x67B0) ||
+	     (adev->pdev->device == 0x67B1))) {
+		if ((memory_clock > 100000) && (memory_clock <= 125000)) {
+			tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
+			*dram_timimg2 &= ~0x00ff0000;
+			*dram_timimg2 |= tmp2 << 16;
+		} else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
+			tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
+			*dram_timimg2 &= ~0x00ff0000;
+			*dram_timimg2 |= tmp2 << 16;
+		}
+	}
+}
+
+static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev,
+						u32 sclk,
+						u32 mclk,
+						SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+	u32 dram_timing;
+	u32 dram_timing2;
+	u32 burst_time;
+
+	amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk);
+
+	dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING);
+	dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
+	burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK;
+
+	ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2);
+
+	arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
+	arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
+	arb_regs->McArbBurstTime = (u8)burst_time;
+
+	return 0;
+}
+
+static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	SMU7_Discrete_MCArbDramTimingTable arb_regs;
+	u32 i, j;
+	int ret =  0;
+
+	memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
+
+	for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
+		for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
+			ret = ci_populate_memory_timing_parameters(adev,
+								   pi->dpm_table.sclk_table.dpm_levels[i].value,
+								   pi->dpm_table.mclk_table.dpm_levels[j].value,
+								   &arb_regs.entries[i][j]);
+			if (ret)
+				break;
+		}
+	}
+
+	if (ret == 0)
+		ret = amdgpu_ci_copy_bytes_to_smc(adev,
+					   pi->arb_table_start,
+					   (u8 *)&arb_regs,
+					   sizeof(SMU7_Discrete_MCArbDramTimingTable),
+					   pi->sram_end);
+
+	return ret;
+}
+
+static int ci_program_memory_timing_parameters(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+
+	if (pi->need_update_smu7_dpm_table == 0)
+		return 0;
+
+	return ci_do_program_memory_timing_parameters(adev);
+}
+
+static void ci_populate_smc_initial_state(struct amdgpu_device *adev,
+					  struct amdgpu_ps *amdgpu_boot_state)
+{
+	struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state);
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u32 level = 0;
+
+	for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
+		if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
+		    boot_state->performance_levels[0].sclk) {
+			pi->smc_state_table.GraphicsBootLevel = level;
+			break;
+		}
+	}
+
+	for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
+		if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
+		    boot_state->performance_levels[0].mclk) {
+			pi->smc_state_table.MemoryBootLevel = level;
+			break;
+		}
+	}
+}
+
+static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
+{
+	u32 i;
+	u32 mask_value = 0;
+
+	for (i = dpm_table->count; i > 0; i--) {
+		mask_value = mask_value << 1;
+		if (dpm_table->dpm_levels[i-1].enabled)
+			mask_value |= 0x1;
+		else
+			mask_value &= 0xFFFFFFFE;
+	}
+
+	return mask_value;
+}
+
+static void ci_populate_smc_link_level(struct amdgpu_device *adev,
+				       SMU7_Discrete_DpmTable *table)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct ci_dpm_table *dpm_table = &pi->dpm_table;
+	u32 i;
+
+	for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
+		table->LinkLevel[i].PcieGenSpeed =
+			(u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
+		table->LinkLevel[i].PcieLaneCount =
+			amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+		table->LinkLevel[i].EnabledForActivity = 1;
+		table->LinkLevel[i].DownT = cpu_to_be32(5);
+		table->LinkLevel[i].UpT = cpu_to_be32(30);
+	}
+
+	pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
+	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
+		ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+}
+
+static int ci_populate_smc_uvd_level(struct amdgpu_device *adev,
+				     SMU7_Discrete_DpmTable *table)
+{
+	u32 count;
+	struct atom_clock_dividers dividers;
+	int ret = -EINVAL;
+
+	table->UvdLevelCount =
+		adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
+
+	for (count = 0; count < table->UvdLevelCount; count++) {
+		table->UvdLevel[count].VclkFrequency =
+			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
+		table->UvdLevel[count].DclkFrequency =
+			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
+		table->UvdLevel[count].MinVddc =
+			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
+		table->UvdLevel[count].MinVddcPhases = 1;
+
+		ret = amdgpu_atombios_get_clock_dividers(adev,
+							 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+							 table->UvdLevel[count].VclkFrequency, false, &dividers);
+		if (ret)
+			return ret;
+
+		table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
+
+		ret = amdgpu_atombios_get_clock_dividers(adev,
+							 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+							 table->UvdLevel[count].DclkFrequency, false, &dividers);
+		if (ret)
+			return ret;
+
+		table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
+
+		table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
+		table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
+		table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
+	}
+
+	return ret;
+}
+
+static int ci_populate_smc_vce_level(struct amdgpu_device *adev,
+				     SMU7_Discrete_DpmTable *table)
+{
+	u32 count;
+	struct atom_clock_dividers dividers;
+	int ret = -EINVAL;
+
+	table->VceLevelCount =
+		adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
+
+	for (count = 0; count < table->VceLevelCount; count++) {
+		table->VceLevel[count].Frequency =
+			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
+		table->VceLevel[count].MinVoltage =
+			(u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
+		table->VceLevel[count].MinPhases = 1;
+
+		ret = amdgpu_atombios_get_clock_dividers(adev,
+							 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+							 table->VceLevel[count].Frequency, false, &dividers);
+		if (ret)
+			return ret;
+
+		table->VceLevel[count].Divider = (u8)dividers.post_divider;
+
+		table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
+		table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
+	}
+
+	return ret;
+
+}
+
+static int ci_populate_smc_acp_level(struct amdgpu_device *adev,
+				     SMU7_Discrete_DpmTable *table)
+{
+	u32 count;
+	struct atom_clock_dividers dividers;
+	int ret = -EINVAL;
+
+	table->AcpLevelCount = (u8)
+		(adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
+
+	for (count = 0; count < table->AcpLevelCount; count++) {
+		table->AcpLevel[count].Frequency =
+			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
+		table->AcpLevel[count].MinVoltage =
+			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
+		table->AcpLevel[count].MinPhases = 1;
+
+		ret = amdgpu_atombios_get_clock_dividers(adev,
+							 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+							 table->AcpLevel[count].Frequency, false, &dividers);
+		if (ret)
+			return ret;
+
+		table->AcpLevel[count].Divider = (u8)dividers.post_divider;
+
+		table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
+		table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
+	}
+
+	return ret;
+}
+
+static int ci_populate_smc_samu_level(struct amdgpu_device *adev,
+				      SMU7_Discrete_DpmTable *table)
+{
+	u32 count;
+	struct atom_clock_dividers dividers;
+	int ret = -EINVAL;
+
+	table->SamuLevelCount =
+		adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
+
+	for (count = 0; count < table->SamuLevelCount; count++) {
+		table->SamuLevel[count].Frequency =
+			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
+		table->SamuLevel[count].MinVoltage =
+			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
+		table->SamuLevel[count].MinPhases = 1;
+
+		ret = amdgpu_atombios_get_clock_dividers(adev,
+							 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+							 table->SamuLevel[count].Frequency, false, &dividers);
+		if (ret)
+			return ret;
+
+		table->SamuLevel[count].Divider = (u8)dividers.post_divider;
+
+		table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
+		table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
+	}
+
+	return ret;
+}
+
+static int ci_calculate_mclk_params(struct amdgpu_device *adev,
+				    u32 memory_clock,
+				    SMU7_Discrete_MemoryLevel *mclk,
+				    bool strobe_mode,
+				    bool dll_state_on)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u32  dll_cntl = pi->clock_registers.dll_cntl;
+	u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
+	u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
+	u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
+	u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
+	u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
+	u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
+	u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
+	u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
+	struct atom_mpll_param mpll_param;
+	int ret;
+
+	ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
+	if (ret)
+		return ret;
+
+	mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK;
+	mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT);
+
+	mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK |
+			MPLL_FUNC_CNTL_1__VCO_MODE_MASK);
+	mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT |
+		(mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) |
+		(mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT);
+
+	mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
+	mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
+
+	if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+		mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
+				MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
+		mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
+				(mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
+	}
+
+	if (pi->caps_mclk_ss_support) {
+		struct amdgpu_atom_ss ss;
+		u32 freq_nom;
+		u32 tmp;
+		u32 reference_clock = adev->clock.mpll.reference_freq;
+
+		if (mpll_param.qdr == 1)
+			freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
+		else
+			freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
+
+		tmp = (freq_nom / reference_clock);
+		tmp = tmp * tmp;
+		if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
+						     ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
+			u32 clks = reference_clock * 5 / ss.rate;
+			u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
+
+			mpll_ss1 &= ~MPLL_SS1__CLKV_MASK;
+			mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT);
+
+			mpll_ss2 &= ~MPLL_SS2__CLKS_MASK;
+			mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT);
+		}
+	}
+
+	mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK;
+	mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT);
+
+	if (dll_state_on)
+		mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
+			MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK;
+	else
+		mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
+			MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
+
+	mclk->MclkFrequency = memory_clock;
+	mclk->MpllFuncCntl = mpll_func_cntl;
+	mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
+	mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
+	mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
+	mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
+	mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
+	mclk->DllCntl = dll_cntl;
+	mclk->MpllSs1 = mpll_ss1;
+	mclk->MpllSs2 = mpll_ss2;
+
+	return 0;
+}
+
+static int ci_populate_single_memory_level(struct amdgpu_device *adev,
+					   u32 memory_clock,
+					   SMU7_Discrete_MemoryLevel *memory_level)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	int ret;
+	bool dll_state_on;
+
+	if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
+		ret = ci_get_dependency_volt_by_clk(adev,
+						    &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+						    memory_clock, &memory_level->MinVddc);
+		if (ret)
+			return ret;
+	}
+
+	if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
+		ret = ci_get_dependency_volt_by_clk(adev,
+						    &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+						    memory_clock, &memory_level->MinVddci);
+		if (ret)
+			return ret;
+	}
+
+	if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
+		ret = ci_get_dependency_volt_by_clk(adev,
+						    &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
+						    memory_clock, &memory_level->MinMvdd);
+		if (ret)
+			return ret;
+	}
+
+	memory_level->MinVddcPhases = 1;
+
+	if (pi->vddc_phase_shed_control)
+		ci_populate_phase_value_based_on_mclk(adev,
+						      &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+						      memory_clock,
+						      &memory_level->MinVddcPhases);
+
+	memory_level->EnabledForThrottle = 1;
+	memory_level->EnabledForActivity = 1;
+	memory_level->UpH = 0;
+	memory_level->DownH = 100;
+	memory_level->VoltageDownH = 0;
+	memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
+
+	memory_level->StutterEnable = false;
+	memory_level->StrobeEnable = false;
+	memory_level->EdcReadEnable = false;
+	memory_level->EdcWriteEnable = false;
+	memory_level->RttEnable = false;
+
+	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+	if (pi->mclk_stutter_mode_threshold &&
+	    (memory_clock <= pi->mclk_stutter_mode_threshold) &&
+	    (pi->uvd_enabled == false) &&
+	    (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
+	    (adev->pm.dpm.new_active_crtc_count <= 2))
+		memory_level->StutterEnable = true;
+
+	if (pi->mclk_strobe_mode_threshold &&
+	    (memory_clock <= pi->mclk_strobe_mode_threshold))
+		memory_level->StrobeEnable = 1;
+
+	if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+		memory_level->StrobeRatio =
+			ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
+		if (pi->mclk_edc_enable_threshold &&
+		    (memory_clock > pi->mclk_edc_enable_threshold))
+			memory_level->EdcReadEnable = true;
+
+		if (pi->mclk_edc_wr_enable_threshold &&
+		    (memory_clock > pi->mclk_edc_wr_enable_threshold))
+			memory_level->EdcWriteEnable = true;
+
+		if (memory_level->StrobeEnable) {
+			if (ci_get_mclk_frequency_ratio(memory_clock, true) >=
+			    ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf))
+				dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+			else
+				dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
+		} else {
+			dll_state_on = pi->dll_default_on;
+		}
+	} else {
+		memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock);
+		dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+	}
+
+	ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+	if (ret)
+		return ret;
+
+	memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
+	memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
+	memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
+	memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
+
+	memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
+	memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
+	memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
+	memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
+	memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
+	memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
+	memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
+	memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
+	memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
+	memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
+	memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
+
+	return 0;
+}
+
+static int ci_populate_smc_acpi_level(struct amdgpu_device *adev,
+				      SMU7_Discrete_DpmTable *table)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct atom_clock_dividers dividers;
+	SMU7_Discrete_VoltageLevel voltage_level;
+	u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
+	u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
+	u32 dll_cntl = pi->clock_registers.dll_cntl;
+	u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
+	int ret;
+
+	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+	if (pi->acpi_vddc)
+		table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
+	else
+		table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
+
+	table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
+
+	table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq;
+
+	ret = amdgpu_atombios_get_clock_dividers(adev,
+						 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
+						 table->ACPILevel.SclkFrequency, false, &dividers);
+	if (ret)
+		return ret;
+
+	table->ACPILevel.SclkDid = (u8)dividers.post_divider;
+	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+	table->ACPILevel.DeepSleepDivId = 0;
+
+	spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK;
+	spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
+
+	spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
+	spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT);
+
+	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+	table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
+	table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
+	table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
+	table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
+	table->ACPILevel.CcPwrDynRm = 0;
+	table->ACPILevel.CcPwrDynRm1 = 0;
+
+	table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
+	table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
+	table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
+	table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
+	table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
+	table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
+	table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
+	table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
+	table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
+	table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
+	table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
+
+	table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
+	table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
+
+	if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
+		if (pi->acpi_vddci)
+			table->MemoryACPILevel.MinVddci =
+				cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
+		else
+			table->MemoryACPILevel.MinVddci =
+				cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
+	}
+
+	if (ci_populate_mvdd_value(adev, 0, &voltage_level))
+		table->MemoryACPILevel.MinMvdd = 0;
+	else
+		table->MemoryACPILevel.MinMvdd =
+			cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
+
+	mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK |
+		MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK;
+	mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
+			MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
+
+	dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK);
+
+	table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
+	table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
+	table->MemoryACPILevel.MpllAdFuncCntl =
+		cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
+	table->MemoryACPILevel.MpllDqFuncCntl =
+		cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
+	table->MemoryACPILevel.MpllFuncCntl =
+		cpu_to_be32(pi->clock_registers.mpll_func_cntl);
+	table->MemoryACPILevel.MpllFuncCntl_1 =
+		cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
+	table->MemoryACPILevel.MpllFuncCntl_2 =
+		cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
+	table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
+	table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
+
+	table->MemoryACPILevel.EnabledForThrottle = 0;
+	table->MemoryACPILevel.EnabledForActivity = 0;
+	table->MemoryACPILevel.UpH = 0;
+	table->MemoryACPILevel.DownH = 100;
+	table->MemoryACPILevel.VoltageDownH = 0;
+	table->MemoryACPILevel.ActivityLevel =
+		cpu_to_be16((u16)pi->mclk_activity_target);
+
+	table->MemoryACPILevel.StutterEnable = false;
+	table->MemoryACPILevel.StrobeEnable = false;
+	table->MemoryACPILevel.EdcReadEnable = false;
+	table->MemoryACPILevel.EdcWriteEnable = false;
+	table->MemoryACPILevel.RttEnable = false;
+
+	return 0;
+}
+
+
+static int ci_enable_ulv(struct amdgpu_device *adev, bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct ci_ulv_parm *ulv = &pi->ulv;
+
+	if (ulv->supported) {
+		if (enable)
+			return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
+				0 : -EINVAL;
+		else
+			return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
+				0 : -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ci_populate_ulv_level(struct amdgpu_device *adev,
+				 SMU7_Discrete_Ulv *state)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u16 ulv_voltage = adev->pm.dpm.backbias_response_time;
+
+	state->CcPwrDynRm = 0;
+	state->CcPwrDynRm1 = 0;
+
+	if (ulv_voltage == 0) {
+		pi->ulv.supported = false;
+		return 0;
+	}
+
+	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+		if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
+			state->VddcOffset = 0;
+		else
+			state->VddcOffset =
+				adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
+	} else {
+		if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
+			state->VddcOffsetVid = 0;
+		else
+			state->VddcOffsetVid = (u8)
+				((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
+				 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+	}
+	state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
+
+	state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
+	state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
+	state->VddcOffset = cpu_to_be16(state->VddcOffset);
+
+	return 0;
+}
+
+static int ci_calculate_sclk_params(struct amdgpu_device *adev,
+				    u32 engine_clock,
+				    SMU7_Discrete_GraphicsLevel *sclk)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct atom_clock_dividers dividers;
+	u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
+	u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
+	u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
+	u32 reference_clock = adev->clock.spll.reference_freq;
+	u32 reference_divider;
+	u32 fbdiv;
+	int ret;
+
+	ret = amdgpu_atombios_get_clock_dividers(adev,
+						 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
+						 engine_clock, false, &dividers);
+	if (ret)
+		return ret;
+
+	reference_divider = 1 + dividers.ref_div;
+	fbdiv = dividers.fb_div & 0x3FFFFFF;
+
+	spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
+	spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT);
+	spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
+
+	if (pi->caps_sclk_ss_support) {
+		struct amdgpu_atom_ss ss;
+		u32 vco_freq = engine_clock * dividers.post_div;
+
+		if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
+						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
+			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
+			u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
+
+			cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
+			cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT);
+			cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT);
+
+			cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK;
+			cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT);
+		}
+	}
+
+	sclk->SclkFrequency = engine_clock;
+	sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+	sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+	sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
+	sclk->SclkDid = (u8)dividers.post_divider;
+
+	return 0;
+}
+
+static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
+					    u32 engine_clock,
+					    u16 sclk_activity_level_t,
+					    SMU7_Discrete_GraphicsLevel *graphic_level)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	int ret;
+
+	ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level);
+	if (ret)
+		return ret;
+
+	ret = ci_get_dependency_volt_by_clk(adev,
+					    &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+					    engine_clock, &graphic_level->MinVddc);
+	if (ret)
+		return ret;
+
+	graphic_level->SclkFrequency = engine_clock;
+
+	graphic_level->Flags =  0;
+	graphic_level->MinVddcPhases = 1;
+
+	if (pi->vddc_phase_shed_control)
+		ci_populate_phase_value_based_on_sclk(adev,
+						      &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+						      engine_clock,
+						      &graphic_level->MinVddcPhases);
+
+	graphic_level->ActivityLevel = sclk_activity_level_t;
+
+	graphic_level->CcPwrDynRm = 0;
+	graphic_level->CcPwrDynRm1 = 0;
+	graphic_level->EnabledForThrottle = 1;
+	graphic_level->UpH = 0;
+	graphic_level->DownH = 0;
+	graphic_level->VoltageDownH = 0;
+	graphic_level->PowerThrottle = 0;
+
+	if (pi->caps_sclk_ds)
+		graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(adev,
+										   engine_clock,
+										   CISLAND_MINIMUM_ENGINE_CLOCK);
+
+	graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+	graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
+	graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
+	graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
+	graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
+	graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
+	graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
+	graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
+	graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
+	graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
+	graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
+	graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
+	graphic_level->EnabledForActivity = 1;
+
+	return 0;
+}
+
+static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct ci_dpm_table *dpm_table = &pi->dpm_table;
+	u32 level_array_address = pi->dpm_table_start +
+		offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
+	u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
+		SMU7_MAX_LEVELS_GRAPHICS;
+	SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
+	u32 i, ret;
+
+	memset(levels, 0, level_array_size);
+
+	for (i = 0; i < dpm_table->sclk_table.count; i++) {
+		ret = ci_populate_single_graphic_level(adev,
+						       dpm_table->sclk_table.dpm_levels[i].value,
+						       (u16)pi->activity_target[i],
+						       &pi->smc_state_table.GraphicsLevel[i]);
+		if (ret)
+			return ret;
+		if (i > 1)
+			pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+		if (i == (dpm_table->sclk_table.count - 1))
+			pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
+				PPSMC_DISPLAY_WATERMARK_HIGH;
+	}
+
+	pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
+	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
+		ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+	ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
+				   (u8 *)levels, level_array_size,
+				   pi->sram_end);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int ci_populate_ulv_state(struct amdgpu_device *adev,
+				 SMU7_Discrete_Ulv *ulv_level)
+{
+	return ci_populate_ulv_level(adev, ulv_level);
+}
+
+static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct ci_dpm_table *dpm_table = &pi->dpm_table;
+	u32 level_array_address = pi->dpm_table_start +
+		offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
+	u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
+		SMU7_MAX_LEVELS_MEMORY;
+	SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
+	u32 i, ret;
+
+	memset(levels, 0, level_array_size);
+
+	for (i = 0; i < dpm_table->mclk_table.count; i++) {
+		if (dpm_table->mclk_table.dpm_levels[i].value == 0)
+			return -EINVAL;
+		ret = ci_populate_single_memory_level(adev,
+						      dpm_table->mclk_table.dpm_levels[i].value,
+						      &pi->smc_state_table.MemoryLevel[i]);
+		if (ret)
+			return ret;
+	}
+
+	if ((dpm_table->mclk_table.count >= 2) &&
+	    ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
+		pi->smc_state_table.MemoryLevel[1].MinVddc =
+			pi->smc_state_table.MemoryLevel[0].MinVddc;
+		pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
+			pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
+	}
+
+	pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
+
+	pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
+	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
+		ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+
+	pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
+		PPSMC_DISPLAY_WATERMARK_HIGH;
+
+	ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
+				   (u8 *)levels, level_array_size,
+				   pi->sram_end);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void ci_reset_single_dpm_table(struct amdgpu_device *adev,
+				      struct ci_single_dpm_table* dpm_table,
+				      u32 count)
+{
+	u32 i;
+
+	dpm_table->count = count;
+	for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
+		dpm_table->dpm_levels[i].enabled = false;
+}
+
+static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
+				      u32 index, u32 pcie_gen, u32 pcie_lanes)
+{
+	dpm_table->dpm_levels[index].value = pcie_gen;
+	dpm_table->dpm_levels[index].param1 = pcie_lanes;
+	dpm_table->dpm_levels[index].enabled = true;
+}
+
+static int ci_setup_default_pcie_tables(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+
+	if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
+		return -EINVAL;
+
+	if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
+		pi->pcie_gen_powersaving = pi->pcie_gen_performance;
+		pi->pcie_lane_powersaving = pi->pcie_lane_performance;
+	} else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
+		pi->pcie_gen_performance = pi->pcie_gen_powersaving;
+		pi->pcie_lane_performance = pi->pcie_lane_powersaving;
+	}
+
+	ci_reset_single_dpm_table(adev,
+				  &pi->dpm_table.pcie_speed_table,
+				  SMU7_MAX_LEVELS_LINK);
+
+	if (adev->asic_type == CHIP_BONAIRE)
+		ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
+					  pi->pcie_gen_powersaving.min,
+					  pi->pcie_lane_powersaving.max);
+	else
+		ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
+					  pi->pcie_gen_powersaving.min,
+					  pi->pcie_lane_powersaving.min);
+	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
+				  pi->pcie_gen_performance.min,
+				  pi->pcie_lane_performance.min);
+	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
+				  pi->pcie_gen_powersaving.min,
+				  pi->pcie_lane_powersaving.max);
+	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
+				  pi->pcie_gen_performance.min,
+				  pi->pcie_lane_performance.max);
+	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
+				  pi->pcie_gen_powersaving.max,
+				  pi->pcie_lane_powersaving.max);
+	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
+				  pi->pcie_gen_performance.max,
+				  pi->pcie_lane_performance.max);
+
+	pi->dpm_table.pcie_speed_table.count = 6;
+
+	return 0;
+}
+
+static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
+		&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+	struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table =
+		&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
+	struct amdgpu_cac_leakage_table *std_voltage_table =
+		&adev->pm.dpm.dyn_state.cac_leakage_table;
+	u32 i;
+
+	if (allowed_sclk_vddc_table == NULL)
+		return -EINVAL;
+	if (allowed_sclk_vddc_table->count < 1)
+		return -EINVAL;
+	if (allowed_mclk_table == NULL)
+		return -EINVAL;
+	if (allowed_mclk_table->count < 1)
+		return -EINVAL;
+
+	memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
+
+	ci_reset_single_dpm_table(adev,
+				  &pi->dpm_table.sclk_table,
+				  SMU7_MAX_LEVELS_GRAPHICS);
+	ci_reset_single_dpm_table(adev,
+				  &pi->dpm_table.mclk_table,
+				  SMU7_MAX_LEVELS_MEMORY);
+	ci_reset_single_dpm_table(adev,
+				  &pi->dpm_table.vddc_table,
+				  SMU7_MAX_LEVELS_VDDC);
+	ci_reset_single_dpm_table(adev,
+				  &pi->dpm_table.vddci_table,
+				  SMU7_MAX_LEVELS_VDDCI);
+	ci_reset_single_dpm_table(adev,
+				  &pi->dpm_table.mvdd_table,
+				  SMU7_MAX_LEVELS_MVDD);
+
+	pi->dpm_table.sclk_table.count = 0;
+	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
+		if ((i == 0) ||
+		    (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
+		     allowed_sclk_vddc_table->entries[i].clk)) {
+			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
+				allowed_sclk_vddc_table->entries[i].clk;
+			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
+				(i == 0) ? true : false;
+			pi->dpm_table.sclk_table.count++;
+		}
+	}
+
+	pi->dpm_table.mclk_table.count = 0;
+	for (i = 0; i < allowed_mclk_table->count; i++) {
+		if ((i == 0) ||
+		    (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
+		     allowed_mclk_table->entries[i].clk)) {
+			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
+				allowed_mclk_table->entries[i].clk;
+			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
+				(i == 0) ? true : false;
+			pi->dpm_table.mclk_table.count++;
+		}
+	}
+
+	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
+		pi->dpm_table.vddc_table.dpm_levels[i].value =
+			allowed_sclk_vddc_table->entries[i].v;
+		pi->dpm_table.vddc_table.dpm_levels[i].param1 =
+			std_voltage_table->entries[i].leakage;
+		pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
+	}
+	pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
+
+	allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
+	if (allowed_mclk_table) {
+		for (i = 0; i < allowed_mclk_table->count; i++) {
+			pi->dpm_table.vddci_table.dpm_levels[i].value =
+				allowed_mclk_table->entries[i].v;
+			pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
+		}
+		pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
+	}
+
+	allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
+	if (allowed_mclk_table) {
+		for (i = 0; i < allowed_mclk_table->count; i++) {
+			pi->dpm_table.mvdd_table.dpm_levels[i].value =
+				allowed_mclk_table->entries[i].v;
+			pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
+		}
+		pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
+	}
+
+	ci_setup_default_pcie_tables(adev);
+
+	return 0;
+}
+
+static int ci_find_boot_level(struct ci_single_dpm_table *table,
+			      u32 value, u32 *boot_level)
+{
+	u32 i;
+	int ret = -EINVAL;
+
+	for(i = 0; i < table->count; i++) {
+		if (value == table->dpm_levels[i].value) {
+			*boot_level = i;
+			ret = 0;
+		}
+	}
+
+	return ret;
+}
+
+static int ci_init_smc_table(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct ci_ulv_parm *ulv = &pi->ulv;
+	struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
+	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
+	int ret;
+
+	ret = ci_setup_default_dpm_tables(adev);
+	if (ret)
+		return ret;
+
+	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
+		ci_populate_smc_voltage_tables(adev, table);
+
+	ci_init_fps_limits(adev);
+
+	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+	if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+	if (ulv->supported) {
+		ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv);
+		if (ret)
+			return ret;
+		WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
+	}
+
+	ret = ci_populate_all_graphic_levels(adev);
+	if (ret)
+		return ret;
+
+	ret = ci_populate_all_memory_levels(adev);
+	if (ret)
+		return ret;
+
+	ci_populate_smc_link_level(adev, table);
+
+	ret = ci_populate_smc_acpi_level(adev, table);
+	if (ret)
+		return ret;
+
+	ret = ci_populate_smc_vce_level(adev, table);
+	if (ret)
+		return ret;
+
+	ret = ci_populate_smc_acp_level(adev, table);
+	if (ret)
+		return ret;
+
+	ret = ci_populate_smc_samu_level(adev, table);
+	if (ret)
+		return ret;
+
+	ret = ci_do_program_memory_timing_parameters(adev);
+	if (ret)
+		return ret;
+
+	ret = ci_populate_smc_uvd_level(adev, table);
+	if (ret)
+		return ret;
+
+	table->UvdBootLevel  = 0;
+	table->VceBootLevel  = 0;
+	table->AcpBootLevel  = 0;
+	table->SamuBootLevel  = 0;
+	table->GraphicsBootLevel  = 0;
+	table->MemoryBootLevel  = 0;
+
+	ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
+				 pi->vbios_boot_state.sclk_bootup_value,
+				 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
+
+	ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
+				 pi->vbios_boot_state.mclk_bootup_value,
+				 (u32 *)&pi->smc_state_table.MemoryBootLevel);
+
+	table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
+	table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
+	table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
+
+	ci_populate_smc_initial_state(adev, amdgpu_boot_state);
+
+	ret = ci_populate_bapm_parameters_in_dpm_table(adev);
+	if (ret)
+		return ret;
+
+	table->UVDInterval = 1;
+	table->VCEInterval = 1;
+	table->ACPInterval = 1;
+	table->SAMUInterval = 1;
+	table->GraphicsVoltageChangeEnable = 1;
+	table->GraphicsThermThrottleEnable = 1;
+	table->GraphicsInterval = 1;
+	table->VoltageInterval = 1;
+	table->ThermalInterval = 1;
+	table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
+					     CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
+	table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
+					    CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
+	table->MemoryVoltageChangeEnable = 1;
+	table->MemoryInterval = 1;
+	table->VoltageResponseTime = 0;
+	table->VddcVddciDelta = 4000;
+	table->PhaseResponseTime = 0;
+	table->MemoryThermThrottleEnable = 1;
+	table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
+	table->PCIeGenInterval = 1;
+	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
+		table->SVI2Enable  = 1;
+	else
+		table->SVI2Enable  = 0;
+
+	table->ThermGpio = 17;
+	table->SclkStepSize = 0x4000;
+
+	table->SystemFlags = cpu_to_be32(table->SystemFlags);
+	table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
+	table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
+	table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
+	table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
+	table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
+	table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
+	table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
+	table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
+	table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
+	table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
+	table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
+	table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
+	table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
+
+	ret = amdgpu_ci_copy_bytes_to_smc(adev,
+				   pi->dpm_table_start +
+				   offsetof(SMU7_Discrete_DpmTable, SystemFlags),
+				   (u8 *)&table->SystemFlags,
+				   sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
+				   pi->sram_end);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void ci_trim_single_dpm_states(struct amdgpu_device *adev,
+				      struct ci_single_dpm_table *dpm_table,
+				      u32 low_limit, u32 high_limit)
+{
+	u32 i;
+
+	for (i = 0; i < dpm_table->count; i++) {
+		if ((dpm_table->dpm_levels[i].value < low_limit) ||
+		    (dpm_table->dpm_levels[i].value > high_limit))
+			dpm_table->dpm_levels[i].enabled = false;
+		else
+			dpm_table->dpm_levels[i].enabled = true;
+	}
+}
+
+static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev,
+				    u32 speed_low, u32 lanes_low,
+				    u32 speed_high, u32 lanes_high)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
+	u32 i, j;
+
+	for (i = 0; i < pcie_table->count; i++) {
+		if ((pcie_table->dpm_levels[i].value < speed_low) ||
+		    (pcie_table->dpm_levels[i].param1 < lanes_low) ||
+		    (pcie_table->dpm_levels[i].value > speed_high) ||
+		    (pcie_table->dpm_levels[i].param1 > lanes_high))
+			pcie_table->dpm_levels[i].enabled = false;
+		else
+			pcie_table->dpm_levels[i].enabled = true;
+	}
+
+	for (i = 0; i < pcie_table->count; i++) {
+		if (pcie_table->dpm_levels[i].enabled) {
+			for (j = i + 1; j < pcie_table->count; j++) {
+				if (pcie_table->dpm_levels[j].enabled) {
+					if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
+					    (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
+						pcie_table->dpm_levels[j].enabled = false;
+				}
+			}
+		}
+	}
+}
+
+static int ci_trim_dpm_states(struct amdgpu_device *adev,
+			      struct amdgpu_ps *amdgpu_state)
+{
+	struct ci_ps *state = ci_get_ps(amdgpu_state);
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u32 high_limit_count;
+
+	if (state->performance_level_count < 1)
+		return -EINVAL;
+
+	if (state->performance_level_count == 1)
+		high_limit_count = 0;
+	else
+		high_limit_count = 1;
+
+	ci_trim_single_dpm_states(adev,
+				  &pi->dpm_table.sclk_table,
+				  state->performance_levels[0].sclk,
+				  state->performance_levels[high_limit_count].sclk);
+
+	ci_trim_single_dpm_states(adev,
+				  &pi->dpm_table.mclk_table,
+				  state->performance_levels[0].mclk,
+				  state->performance_levels[high_limit_count].mclk);
+
+	ci_trim_pcie_dpm_states(adev,
+				state->performance_levels[0].pcie_gen,
+				state->performance_levels[0].pcie_lane,
+				state->performance_levels[high_limit_count].pcie_gen,
+				state->performance_levels[high_limit_count].pcie_lane);
+
+	return 0;
+}
+
+static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev)
+{
+	struct amdgpu_clock_voltage_dependency_table *disp_voltage_table =
+		&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
+	struct amdgpu_clock_voltage_dependency_table *vddc_table =
+		&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+	u32 requested_voltage = 0;
+	u32 i;
+
+	if (disp_voltage_table == NULL)
+		return -EINVAL;
+	if (!disp_voltage_table->count)
+		return -EINVAL;
+
+	for (i = 0; i < disp_voltage_table->count; i++) {
+		if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
+			requested_voltage = disp_voltage_table->entries[i].v;
+	}
+
+	for (i = 0; i < vddc_table->count; i++) {
+		if (requested_voltage <= vddc_table->entries[i].v) {
+			requested_voltage = vddc_table->entries[i].v;
+			return (amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+								  PPSMC_MSG_VddC_Request,
+								  requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
+				0 : -EINVAL;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	PPSMC_Result result;
+
+	ci_apply_disp_minimum_voltage_request(adev);
+
+	if (!pi->sclk_dpm_key_disabled) {
+		if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+			result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+								   PPSMC_MSG_SCLKDPM_SetEnabledMask,
+								   pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
+			if (result != PPSMC_Result_OK)
+				return -EINVAL;
+		}
+	}
+
+	if (!pi->mclk_dpm_key_disabled) {
+		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+			result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+								   PPSMC_MSG_MCLKDPM_SetEnabledMask,
+								   pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+			if (result != PPSMC_Result_OK)
+				return -EINVAL;
+		}
+	}
+
+#if 0
+	if (!pi->pcie_dpm_key_disabled) {
+		if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+			result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+								   PPSMC_MSG_PCIeDPM_SetEnabledMask,
+								   pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
+			if (result != PPSMC_Result_OK)
+				return -EINVAL;
+		}
+	}
+#endif
+
+	return 0;
+}
+
+static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev,
+						   struct amdgpu_ps *amdgpu_state)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct ci_ps *state = ci_get_ps(amdgpu_state);
+	struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
+	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
+	struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
+	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
+	u32 i;
+
+	pi->need_update_smu7_dpm_table = 0;
+
+	for (i = 0; i < sclk_table->count; i++) {
+		if (sclk == sclk_table->dpm_levels[i].value)
+			break;
+	}
+
+	if (i >= sclk_table->count) {
+		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+	} else {
+		/* XXX check display min clock requirements */
+		if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
+			pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
+	}
+
+	for (i = 0; i < mclk_table->count; i++) {
+		if (mclk == mclk_table->dpm_levels[i].value)
+			break;
+	}
+
+	if (i >= mclk_table->count)
+		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+
+	if (adev->pm.dpm.current_active_crtc_count !=
+	    adev->pm.dpm.new_active_crtc_count)
+		pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
+}
+
+static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev,
+						       struct amdgpu_ps *amdgpu_state)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct ci_ps *state = ci_get_ps(amdgpu_state);
+	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
+	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
+	struct ci_dpm_table *dpm_table = &pi->dpm_table;
+	int ret;
+
+	if (!pi->need_update_smu7_dpm_table)
+		return 0;
+
+	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
+		dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
+
+	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
+		dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
+
+	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
+		ret = ci_populate_all_graphic_levels(adev);
+		if (ret)
+			return ret;
+	}
+
+	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
+		ret = ci_populate_all_memory_levels(adev);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	const struct amdgpu_clock_and_voltage_limits *max_limits;
+	int i;
+
+	if (adev->pm.dpm.ac_power)
+		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+	else
+		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+	if (enable) {
+		pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
+
+		for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+			if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+				pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
+
+				if (!pi->caps_uvd_dpm)
+					break;
+			}
+		}
+
+		amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+						  PPSMC_MSG_UVDDPM_SetEnabledMask,
+						  pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
+
+		if (pi->last_mclk_dpm_enable_mask & 0x1) {
+			pi->uvd_enabled = true;
+			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
+			amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
+							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+		}
+	} else {
+		if (pi->last_mclk_dpm_enable_mask & 0x1) {
+			pi->uvd_enabled = false;
+			pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
+			amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
+							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+		}
+	}
+
+	return (amdgpu_ci_send_msg_to_smc(adev, enable ?
+				   PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	const struct amdgpu_clock_and_voltage_limits *max_limits;
+	int i;
+
+	if (adev->pm.dpm.ac_power)
+		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+	else
+		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+	if (enable) {
+		pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
+		for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+			if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+				pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
+
+				if (!pi->caps_vce_dpm)
+					break;
+			}
+		}
+
+		amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+						  PPSMC_MSG_VCEDPM_SetEnabledMask,
+						  pi->dpm_level_enable_mask.vce_dpm_enable_mask);
+	}
+
+	return (amdgpu_ci_send_msg_to_smc(adev, enable ?
+				   PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+#if 0
+static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	const struct amdgpu_clock_and_voltage_limits *max_limits;
+	int i;
+
+	if (adev->pm.dpm.ac_power)
+		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+	else
+		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+	if (enable) {
+		pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
+		for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+			if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+				pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
+
+				if (!pi->caps_samu_dpm)
+					break;
+			}
+		}
+
+		amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+						  PPSMC_MSG_SAMUDPM_SetEnabledMask,
+						  pi->dpm_level_enable_mask.samu_dpm_enable_mask);
+	}
+	return (amdgpu_ci_send_msg_to_smc(adev, enable ?
+				   PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+
+static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	const struct amdgpu_clock_and_voltage_limits *max_limits;
+	int i;
+
+	if (adev->pm.dpm.ac_power)
+		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+	else
+		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+	if (enable) {
+		pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
+		for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+			if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+				pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
+
+				if (!pi->caps_acp_dpm)
+					break;
+			}
+		}
+
+		amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+						  PPSMC_MSG_ACPDPM_SetEnabledMask,
+						  pi->dpm_level_enable_mask.acp_dpm_enable_mask);
+	}
+
+	return (amdgpu_ci_send_msg_to_smc(adev, enable ?
+				   PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
+		0 : -EINVAL;
+}
+#endif
+
+static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u32 tmp;
+
+	if (!gate) {
+		if (pi->caps_uvd_dpm ||
+		    (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
+			pi->smc_state_table.UvdBootLevel = 0;
+		else
+			pi->smc_state_table.UvdBootLevel =
+				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
+
+		tmp = RREG32_SMC(ixDPM_TABLE_475);
+		tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
+		tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
+		WREG32_SMC(ixDPM_TABLE_475, tmp);
+	}
+
+	return ci_enable_uvd_dpm(adev, !gate);
+}
+
+static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
+{
+	u8 i;
+	u32 min_evclk = 30000; /* ??? */
+	struct amdgpu_vce_clock_voltage_dependency_table *table =
+		&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+
+	for (i = 0; i < table->count; i++) {
+		if (table->entries[i].evclk >= min_evclk)
+			return i;
+	}
+
+	return table->count - 1;
+}
+
+static int ci_update_vce_dpm(struct amdgpu_device *adev,
+			     struct amdgpu_ps *amdgpu_new_state,
+			     struct amdgpu_ps *amdgpu_current_state)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	int ret = 0;
+	u32 tmp;
+
+	if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
+		if (amdgpu_new_state->evclk) {
+			/* turn the clocks on when encoding */
+			ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+							    AMD_CG_STATE_UNGATE);
+			if (ret)
+				return ret;
+
+			pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
+			tmp = RREG32_SMC(ixDPM_TABLE_475);
+			tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
+			tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT);
+			WREG32_SMC(ixDPM_TABLE_475, tmp);
+
+			ret = ci_enable_vce_dpm(adev, true);
+		} else {
+			/* turn the clocks off when not encoding */
+			ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+							    AMD_CG_STATE_GATE);
+			if (ret)
+				return ret;
+
+			ret = ci_enable_vce_dpm(adev, false);
+		}
+	}
+	return ret;
+}
+
+#if 0
+static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate)
+{
+	return ci_enable_samu_dpm(adev, gate);
+}
+
+static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u32 tmp;
+
+	if (!gate) {
+		pi->smc_state_table.AcpBootLevel = 0;
+
+		tmp = RREG32_SMC(ixDPM_TABLE_475);
+		tmp &= ~AcpBootLevel_MASK;
+		tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
+		WREG32_SMC(ixDPM_TABLE_475, tmp);
+	}
+
+	return ci_enable_acp_dpm(adev, !gate);
+}
+#endif
+
+static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev,
+					     struct amdgpu_ps *amdgpu_state)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	int ret;
+
+	ret = ci_trim_dpm_states(adev, amdgpu_state);
+	if (ret)
+		return ret;
+
+	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
+		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
+	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
+		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
+	pi->last_mclk_dpm_enable_mask =
+		pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
+	if (pi->uvd_enabled) {
+		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
+			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
+	}
+	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
+		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
+
+	return 0;
+}
+
+static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
+				       u32 level_mask)
+{
+	u32 level = 0;
+
+	while ((level_mask & (1 << level)) == 0)
+		level++;
+
+	return level;
+}
+
+
+static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
+					  enum amdgpu_dpm_forced_level level)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u32 tmp, levels, i;
+	int ret;
+
+	if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
+		if ((!pi->pcie_dpm_key_disabled) &&
+		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+			levels = 0;
+			tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
+			while (tmp >>= 1)
+				levels++;
+			if (levels) {
+				ret = ci_dpm_force_state_pcie(adev, level);
+				if (ret)
+					return ret;
+				for (i = 0; i < adev->usec_timeout; i++) {
+					tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
+					TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
+					TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
+					if (tmp == levels)
+						break;
+					udelay(1);
+				}
+			}
+		}
+		if ((!pi->sclk_dpm_key_disabled) &&
+		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+			levels = 0;
+			tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
+			while (tmp >>= 1)
+				levels++;
+			if (levels) {
+				ret = ci_dpm_force_state_sclk(adev, levels);
+				if (ret)
+					return ret;
+				for (i = 0; i < adev->usec_timeout; i++) {
+					tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
+					TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
+					TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
+					if (tmp == levels)
+						break;
+					udelay(1);
+				}
+			}
+		}
+		if ((!pi->mclk_dpm_key_disabled) &&
+		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+			levels = 0;
+			tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
+			while (tmp >>= 1)
+				levels++;
+			if (levels) {
+				ret = ci_dpm_force_state_mclk(adev, levels);
+				if (ret)
+					return ret;
+				for (i = 0; i < adev->usec_timeout; i++) {
+					tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
+					TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
+					TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
+					if (tmp == levels)
+						break;
+					udelay(1);
+				}
+			}
+		}
+		if ((!pi->pcie_dpm_key_disabled) &&
+		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+			levels = 0;
+			tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
+			while (tmp >>= 1)
+				levels++;
+			if (levels) {
+				ret = ci_dpm_force_state_pcie(adev, level);
+				if (ret)
+					return ret;
+				for (i = 0; i < adev->usec_timeout; i++) {
+					tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
+					TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
+					TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
+					if (tmp == levels)
+						break;
+					udelay(1);
+				}
+			}
+		}
+	} else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
+		if ((!pi->sclk_dpm_key_disabled) &&
+		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+			levels = ci_get_lowest_enabled_level(adev,
+							     pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
+			ret = ci_dpm_force_state_sclk(adev, levels);
+			if (ret)
+				return ret;
+			for (i = 0; i < adev->usec_timeout; i++) {
+				tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
+				TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
+				TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
+				if (tmp == levels)
+					break;
+				udelay(1);
+			}
+		}
+		if ((!pi->mclk_dpm_key_disabled) &&
+		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+			levels = ci_get_lowest_enabled_level(adev,
+							     pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+			ret = ci_dpm_force_state_mclk(adev, levels);
+			if (ret)
+				return ret;
+			for (i = 0; i < adev->usec_timeout; i++) {
+				tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
+				TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
+				TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
+				if (tmp == levels)
+					break;
+				udelay(1);
+			}
+		}
+		if ((!pi->pcie_dpm_key_disabled) &&
+		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+			levels = ci_get_lowest_enabled_level(adev,
+							     pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
+			ret = ci_dpm_force_state_pcie(adev, levels);
+			if (ret)
+				return ret;
+			for (i = 0; i < adev->usec_timeout; i++) {
+				tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
+				TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
+				TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
+				if (tmp == levels)
+					break;
+				udelay(1);
+			}
+		}
+	} else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
+		if (!pi->pcie_dpm_key_disabled) {
+			PPSMC_Result smc_result;
+
+			smc_result = amdgpu_ci_send_msg_to_smc(adev,
+							       PPSMC_MSG_PCIeDPM_UnForceLevel);
+			if (smc_result != PPSMC_Result_OK)
+				return -EINVAL;
+		}
+		ret = ci_upload_dpm_level_enable_mask(adev);
+		if (ret)
+			return ret;
+	}
+
+	adev->pm.dpm.forced_level = level;
+
+	return 0;
+}
+
+static int ci_set_mc_special_registers(struct amdgpu_device *adev,
+				       struct ci_mc_reg_table *table)
+{
+	u8 i, j, k;
+	u32 temp_reg;
+
+	for (i = 0, j = table->last; i < table->last; i++) {
+		if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+			return -EINVAL;
+		switch(table->mc_reg_address[i].s1) {
+		case mmMC_SEQ_MISC1:
+			temp_reg = RREG32(mmMC_PMG_CMD_EMRS);
+			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
+			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
+			for (k = 0; k < table->num_entries; k++) {
+				table->mc_reg_table_entry[k].mc_data[j] =
+					((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+			}
+			j++;
+			if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+
+			temp_reg = RREG32(mmMC_PMG_CMD_MRS);
+			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
+			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
+			for (k = 0; k < table->num_entries; k++) {
+				table->mc_reg_table_entry[k].mc_data[j] =
+					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+				if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
+					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+			}
+			j++;
+			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+
+			if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
+				table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
+				table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
+				for (k = 0; k < table->num_entries; k++) {
+					table->mc_reg_table_entry[k].mc_data[j] =
+						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+				}
+				j++;
+				if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+					return -EINVAL;
+			}
+			break;
+		case mmMC_SEQ_RESERVE_M:
+			temp_reg = RREG32(mmMC_PMG_CMD_MRS1);
+			table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
+			table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
+			for (k = 0; k < table->num_entries; k++) {
+				table->mc_reg_table_entry[k].mc_data[j] =
+					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+			}
+			j++;
+			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+			break;
+		default:
+			break;
+		}
+
+	}
+
+	table->last = j;
+
+	return 0;
+}
+
+static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
+{
+	bool result = true;
+
+	switch(in_reg) {
+	case mmMC_SEQ_RAS_TIMING:
+		*out_reg = mmMC_SEQ_RAS_TIMING_LP;
+		break;
+	case mmMC_SEQ_DLL_STBY:
+		*out_reg = mmMC_SEQ_DLL_STBY_LP;
+		break;
+	case mmMC_SEQ_G5PDX_CMD0:
+		*out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
+		break;
+	case mmMC_SEQ_G5PDX_CMD1:
+		*out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
+		break;
+	case mmMC_SEQ_G5PDX_CTRL:
+		*out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
+		break;
+	case mmMC_SEQ_CAS_TIMING:
+		*out_reg = mmMC_SEQ_CAS_TIMING_LP;
+	    break;
+	case mmMC_SEQ_MISC_TIMING:
+		*out_reg = mmMC_SEQ_MISC_TIMING_LP;
+		break;
+	case mmMC_SEQ_MISC_TIMING2:
+		*out_reg = mmMC_SEQ_MISC_TIMING2_LP;
+		break;
+	case mmMC_SEQ_PMG_DVS_CMD:
+		*out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
+		break;
+	case mmMC_SEQ_PMG_DVS_CTL:
+		*out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
+		break;
+	case mmMC_SEQ_RD_CTL_D0:
+		*out_reg = mmMC_SEQ_RD_CTL_D0_LP;
+		break;
+	case mmMC_SEQ_RD_CTL_D1:
+		*out_reg = mmMC_SEQ_RD_CTL_D1_LP;
+		break;
+	case mmMC_SEQ_WR_CTL_D0:
+		*out_reg = mmMC_SEQ_WR_CTL_D0_LP;
+		break;
+	case mmMC_SEQ_WR_CTL_D1:
+		*out_reg = mmMC_SEQ_WR_CTL_D1_LP;
+		break;
+	case mmMC_PMG_CMD_EMRS:
+		*out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
+		break;
+	case mmMC_PMG_CMD_MRS:
+		*out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
+		break;
+	case mmMC_PMG_CMD_MRS1:
+		*out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
+		break;
+	case mmMC_SEQ_PMG_TIMING:
+		*out_reg = mmMC_SEQ_PMG_TIMING_LP;
+		break;
+	case mmMC_PMG_CMD_MRS2:
+		*out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
+		break;
+	case mmMC_SEQ_WR_CTL_2:
+		*out_reg = mmMC_SEQ_WR_CTL_2_LP;
+		break;
+	default:
+		result = false;
+		break;
+	}
+
+	return result;
+}
+
+static void ci_set_valid_flag(struct ci_mc_reg_table *table)
+{
+	u8 i, j;
+
+	for (i = 0; i < table->last; i++) {
+		for (j = 1; j < table->num_entries; j++) {
+			if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+			    table->mc_reg_table_entry[j].mc_data[i]) {
+				table->valid_flag |= 1 << i;
+				break;
+			}
+		}
+	}
+}
+
+static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
+{
+	u32 i;
+	u16 address;
+
+	for (i = 0; i < table->last; i++) {
+		table->mc_reg_address[i].s0 =
+			ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
+			address : table->mc_reg_address[i].s1;
+	}
+}
+
+static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
+				      struct ci_mc_reg_table *ci_table)
+{
+	u8 i, j;
+
+	if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+		return -EINVAL;
+	if (table->num_entries > MAX_AC_TIMING_ENTRIES)
+		return -EINVAL;
+
+	for (i = 0; i < table->last; i++)
+		ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+
+	ci_table->last = table->last;
+
+	for (i = 0; i < table->num_entries; i++) {
+		ci_table->mc_reg_table_entry[i].mclk_max =
+			table->mc_reg_table_entry[i].mclk_max;
+		for (j = 0; j < table->last; j++)
+			ci_table->mc_reg_table_entry[i].mc_data[j] =
+				table->mc_reg_table_entry[i].mc_data[j];
+	}
+	ci_table->num_entries = table->num_entries;
+
+	return 0;
+}
+
+static int ci_register_patching_mc_seq(struct amdgpu_device *adev,
+				       struct ci_mc_reg_table *table)
+{
+	u8 i, k;
+	u32 tmp;
+	bool patch;
+
+	tmp = RREG32(mmMC_SEQ_MISC0);
+	patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
+
+	if (patch &&
+	    ((adev->pdev->device == 0x67B0) ||
+	     (adev->pdev->device == 0x67B1))) {
+		for (i = 0; i < table->last; i++) {
+			if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+			switch (table->mc_reg_address[i].s1) {
+			case mmMC_SEQ_MISC1:
+				for (k = 0; k < table->num_entries; k++) {
+					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+					    (table->mc_reg_table_entry[k].mclk_max == 137500))
+						table->mc_reg_table_entry[k].mc_data[i] =
+							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
+							0x00000007;
+				}
+				break;
+			case mmMC_SEQ_WR_CTL_D0:
+				for (k = 0; k < table->num_entries; k++) {
+					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+					    (table->mc_reg_table_entry[k].mclk_max == 137500))
+						table->mc_reg_table_entry[k].mc_data[i] =
+							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
+							0x0000D0DD;
+				}
+				break;
+			case mmMC_SEQ_WR_CTL_D1:
+				for (k = 0; k < table->num_entries; k++) {
+					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+					    (table->mc_reg_table_entry[k].mclk_max == 137500))
+						table->mc_reg_table_entry[k].mc_data[i] =
+							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
+							0x0000D0DD;
+				}
+				break;
+			case mmMC_SEQ_WR_CTL_2:
+				for (k = 0; k < table->num_entries; k++) {
+					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
+					    (table->mc_reg_table_entry[k].mclk_max == 137500))
+						table->mc_reg_table_entry[k].mc_data[i] = 0;
+				}
+				break;
+			case mmMC_SEQ_CAS_TIMING:
+				for (k = 0; k < table->num_entries; k++) {
+					if (table->mc_reg_table_entry[k].mclk_max == 125000)
+						table->mc_reg_table_entry[k].mc_data[i] =
+							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
+							0x000C0140;
+					else if (table->mc_reg_table_entry[k].mclk_max == 137500)
+						table->mc_reg_table_entry[k].mc_data[i] =
+							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
+							0x000C0150;
+				}
+				break;
+			case mmMC_SEQ_MISC_TIMING:
+				for (k = 0; k < table->num_entries; k++) {
+					if (table->mc_reg_table_entry[k].mclk_max == 125000)
+						table->mc_reg_table_entry[k].mc_data[i] =
+							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
+							0x00000030;
+					else if (table->mc_reg_table_entry[k].mclk_max == 137500)
+						table->mc_reg_table_entry[k].mc_data[i] =
+							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
+							0x00000035;
+				}
+				break;
+			default:
+				break;
+			}
+		}
+
+		WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
+		tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
+		tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
+		WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
+		WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp);
+	}
+
+	return 0;
+}
+
+static int ci_initialize_mc_reg_table(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct atom_mc_reg_table *table;
+	struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
+	u8 module_index = ci_get_memory_module_index(adev);
+	int ret;
+
+	table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
+
+	WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING));
+	WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING));
+	WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY));
+	WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0));
+	WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1));
+	WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL));
+	WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD));
+	WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL));
+	WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING));
+	WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2));
+	WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS));
+	WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS));
+	WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1));
+	WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0));
+	WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1));
+	WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0));
+	WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1));
+	WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING));
+	WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2));
+	WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2));
+
+	ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
+	if (ret)
+		goto init_mc_done;
+
+	ret = ci_copy_vbios_mc_reg_table(table, ci_table);
+	if (ret)
+		goto init_mc_done;
+
+	ci_set_s0_mc_reg_index(ci_table);
+
+	ret = ci_register_patching_mc_seq(adev, ci_table);
+	if (ret)
+		goto init_mc_done;
+
+	ret = ci_set_mc_special_registers(adev, ci_table);
+	if (ret)
+		goto init_mc_done;
+
+	ci_set_valid_flag(ci_table);
+
+init_mc_done:
+	kfree(table);
+
+	return ret;
+}
+
+static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev,
+					SMU7_Discrete_MCRegisters *mc_reg_table)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u32 i, j;
+
+	for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
+		if (pi->mc_reg_table.valid_flag & (1 << j)) {
+			if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+				return -EINVAL;
+			mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
+			mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
+			i++;
+		}
+	}
+
+	mc_reg_table->last = (u8)i;
+
+	return 0;
+}
+
+static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
+				    SMU7_Discrete_MCRegisterSet *data,
+				    u32 num_entries, u32 valid_flag)
+{
+	u32 i, j;
+
+	for (i = 0, j = 0; j < num_entries; j++) {
+		if (valid_flag & (1 << j)) {
+			data->value[i] = cpu_to_be32(entry->mc_data[j]);
+			i++;
+		}
+	}
+}
+
+static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
+						 const u32 memory_clock,
+						 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u32 i = 0;
+
+	for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
+		if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
+			break;
+	}
+
+	if ((i == pi->mc_reg_table.num_entries) && (i > 0))
+		--i;
+
+	ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
+				mc_reg_table_data, pi->mc_reg_table.last,
+				pi->mc_reg_table.valid_flag);
+}
+
+static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
+					   SMU7_Discrete_MCRegisters *mc_reg_table)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	u32 i;
+
+	for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
+		ci_convert_mc_reg_table_entry_to_smc(adev,
+						     pi->dpm_table.mclk_table.dpm_levels[i].value,
+						     &mc_reg_table->data[i]);
+}
+
+static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	int ret;
+
+	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
+
+	ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table);
+	if (ret)
+		return ret;
+	ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
+
+	return amdgpu_ci_copy_bytes_to_smc(adev,
+				    pi->mc_reg_table_start,
+				    (u8 *)&pi->smc_mc_reg_table,
+				    sizeof(SMU7_Discrete_MCRegisters),
+				    pi->sram_end);
+}
+
+static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+
+	if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+		return 0;
+
+	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
+
+	ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
+
+	return amdgpu_ci_copy_bytes_to_smc(adev,
+				    pi->mc_reg_table_start +
+				    offsetof(SMU7_Discrete_MCRegisters, data[0]),
+				    (u8 *)&pi->smc_mc_reg_table.data[0],
+				    sizeof(SMU7_Discrete_MCRegisterSet) *
+				    pi->dpm_table.mclk_table.count,
+				    pi->sram_end);
+}
+
+static void ci_enable_voltage_control(struct amdgpu_device *adev)
+{
+	u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
+
+	tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK;
+	WREG32_SMC(ixGENERAL_PWRMGT, tmp);
+}
+
+static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev,
+						      struct amdgpu_ps *amdgpu_state)
+{
+	struct ci_ps *state = ci_get_ps(amdgpu_state);
+	int i;
+	u16 pcie_speed, max_speed = 0;
+
+	for (i = 0; i < state->performance_level_count; i++) {
+		pcie_speed = state->performance_levels[i].pcie_gen;
+		if (max_speed < pcie_speed)
+			max_speed = pcie_speed;
+	}
+
+	return max_speed;
+}
+
+static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev)
+{
+	u32 speed_cntl = 0;
+
+	speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) &
+		PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
+	speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+
+	return (u16)speed_cntl;
+}
+
+static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev)
+{
+	u32 link_width = 0;
+
+	link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) &
+		PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK;
+	link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+
+	switch (link_width) {
+	case 1:
+		return 1;
+	case 2:
+		return 2;
+	case 3:
+		return 4;
+	case 4:
+		return 8;
+	case 0:
+	case 6:
+	default:
+		return 16;
+	}
+}
+
+static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
+							     struct amdgpu_ps *amdgpu_new_state,
+							     struct amdgpu_ps *amdgpu_current_state)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	enum amdgpu_pcie_gen target_link_speed =
+		ci_get_maximum_link_speed(adev, amdgpu_new_state);
+	enum amdgpu_pcie_gen current_link_speed;
+
+	if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
+		current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state);
+	else
+		current_link_speed = pi->force_pcie_gen;
+
+	pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+	pi->pspp_notify_required = false;
+	if (target_link_speed > current_link_speed) {
+		switch (target_link_speed) {
+#ifdef CONFIG_ACPI
+		case AMDGPU_PCIE_GEN3:
+			if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
+				break;
+			pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
+			if (current_link_speed == AMDGPU_PCIE_GEN2)
+				break;
+		case AMDGPU_PCIE_GEN2:
+			if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
+				break;
+#endif
+		default:
+			pi->force_pcie_gen = ci_get_current_pcie_speed(adev);
+			break;
+		}
+	} else {
+		if (target_link_speed < current_link_speed)
+			pi->pspp_notify_required = true;
+	}
+}
+
+static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
+							   struct amdgpu_ps *amdgpu_new_state,
+							   struct amdgpu_ps *amdgpu_current_state)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	enum amdgpu_pcie_gen target_link_speed =
+		ci_get_maximum_link_speed(adev, amdgpu_new_state);
+	u8 request;
+
+	if (pi->pspp_notify_required) {
+		if (target_link_speed == AMDGPU_PCIE_GEN3)
+			request = PCIE_PERF_REQ_PECI_GEN3;
+		else if (target_link_speed == AMDGPU_PCIE_GEN2)
+			request = PCIE_PERF_REQ_PECI_GEN2;
+		else
+			request = PCIE_PERF_REQ_PECI_GEN1;
+
+		if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
+		    (ci_get_current_pcie_speed(adev) > 0))
+			return;
+
+#ifdef CONFIG_ACPI
+		amdgpu_acpi_pcie_performance_request(adev, request, false);
+#endif
+	}
+}
+
+static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
+		&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+	struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table =
+		&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
+	struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table =
+		&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
+
+	if (allowed_sclk_vddc_table == NULL)
+		return -EINVAL;
+	if (allowed_sclk_vddc_table->count < 1)
+		return -EINVAL;
+	if (allowed_mclk_vddc_table == NULL)
+		return -EINVAL;
+	if (allowed_mclk_vddc_table->count < 1)
+		return -EINVAL;
+	if (allowed_mclk_vddci_table == NULL)
+		return -EINVAL;
+	if (allowed_mclk_vddci_table->count < 1)
+		return -EINVAL;
+
+	pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
+	pi->max_vddc_in_pp_table =
+		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
+
+	pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
+	pi->max_vddci_in_pp_table =
+		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
+
+	adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
+		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
+	adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
+		allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
+	adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
+		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
+	adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
+		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
+
+	return 0;
+}
+
+static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
+	u32 leakage_index;
+
+	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
+		if (leakage_table->leakage_id[leakage_index] == *vddc) {
+			*vddc = leakage_table->actual_voltage[leakage_index];
+			break;
+		}
+	}
+}
+
+static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
+	u32 leakage_index;
+
+	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
+		if (leakage_table->leakage_id[leakage_index] == *vddci) {
+			*vddci = leakage_table->actual_voltage[leakage_index];
+			break;
+		}
+	}
+}
+
+static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
+								      struct amdgpu_clock_voltage_dependency_table *table)
+{
+	u32 i;
+
+	if (table) {
+		for (i = 0; i < table->count; i++)
+			ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
+	}
+}
+
+static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev,
+								       struct amdgpu_clock_voltage_dependency_table *table)
+{
+	u32 i;
+
+	if (table) {
+		for (i = 0; i < table->count; i++)
+			ci_patch_with_vddci_leakage(adev, &table->entries[i].v);
+	}
+}
+
+static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
+									  struct amdgpu_vce_clock_voltage_dependency_table *table)
+{
+	u32 i;
+
+	if (table) {
+		for (i = 0; i < table->count; i++)
+			ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
+	}
+}
+
+static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
+									  struct amdgpu_uvd_clock_voltage_dependency_table *table)
+{
+	u32 i;
+
+	if (table) {
+		for (i = 0; i < table->count; i++)
+			ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
+	}
+}
+
+static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev,
+								   struct amdgpu_phase_shedding_limits_table *table)
+{
+	u32 i;
+
+	if (table) {
+		for (i = 0; i < table->count; i++)
+			ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage);
+	}
+}
+
+static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev,
+							    struct amdgpu_clock_and_voltage_limits *table)
+{
+	if (table) {
+		ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc);
+		ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci);
+	}
+}
+
+static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev,
+							 struct amdgpu_cac_leakage_table *table)
+{
+	u32 i;
+
+	if (table) {
+		for (i = 0; i < table->count; i++)
+			ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc);
+	}
+}
+
+static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev)
+{
+
+	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
+								  &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
+	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
+								  &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
+	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
+								  &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
+	ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev,
+								   &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
+	ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev,
+								      &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
+	ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev,
+								      &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
+	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
+								  &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
+	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
+								  &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
+	ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev,
+							       &adev->pm.dpm.dyn_state.phase_shedding_limits_table);
+	ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
+							&adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
+	ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
+							&adev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
+	ci_patch_cac_leakage_table_with_vddc_leakage(adev,
+						     &adev->pm.dpm.dyn_state.cac_leakage_table);
+
+}
+
+static void ci_update_current_ps(struct amdgpu_device *adev,
+				 struct amdgpu_ps *rps)
+{
+	struct ci_ps *new_ps = ci_get_ps(rps);
+	struct ci_power_info *pi = ci_get_pi(adev);
+
+	pi->current_rps = *rps;
+	pi->current_ps = *new_ps;
+	pi->current_rps.ps_priv = &pi->current_ps;
+}
+
+static void ci_update_requested_ps(struct amdgpu_device *adev,
+				   struct amdgpu_ps *rps)
+{
+	struct ci_ps *new_ps = ci_get_ps(rps);
+	struct ci_power_info *pi = ci_get_pi(adev);
+
+	pi->requested_rps = *rps;
+	pi->requested_ps = *new_ps;
+	pi->requested_rps.ps_priv = &pi->requested_ps;
+}
+
+static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
+	struct amdgpu_ps *new_ps = &requested_ps;
+
+	ci_update_requested_ps(adev, new_ps);
+
+	ci_apply_state_adjust_rules(adev, &pi->requested_rps);
+
+	return 0;
+}
+
+static void ci_dpm_post_set_power_state(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct amdgpu_ps *new_ps = &pi->requested_rps;
+
+	ci_update_current_ps(adev, new_ps);
+}
+
+
+static void ci_dpm_setup_asic(struct amdgpu_device *adev)
+{
+	ci_read_clock_registers(adev);
+	ci_enable_acpi_power_management(adev);
+	ci_init_sclk_t(adev);
+}
+
+static int ci_dpm_enable(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
+	int ret;
+
+	if (amdgpu_ci_is_smc_running(adev))
+		return -EINVAL;
+	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
+		ci_enable_voltage_control(adev);
+		ret = ci_construct_voltage_tables(adev);
+		if (ret) {
+			DRM_ERROR("ci_construct_voltage_tables failed\n");
+			return ret;
+		}
+	}
+	if (pi->caps_dynamic_ac_timing) {
+		ret = ci_initialize_mc_reg_table(adev);
+		if (ret)
+			pi->caps_dynamic_ac_timing = false;
+	}
+	if (pi->dynamic_ss)
+		ci_enable_spread_spectrum(adev, true);
+	if (pi->thermal_protection)
+		ci_enable_thermal_protection(adev, true);
+	ci_program_sstp(adev);
+	ci_enable_display_gap(adev);
+	ci_program_vc(adev);
+	ret = ci_upload_firmware(adev);
+	if (ret) {
+		DRM_ERROR("ci_upload_firmware failed\n");
+		return ret;
+	}
+	ret = ci_process_firmware_header(adev);
+	if (ret) {
+		DRM_ERROR("ci_process_firmware_header failed\n");
+		return ret;
+	}
+	ret = ci_initial_switch_from_arb_f0_to_f1(adev);
+	if (ret) {
+		DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
+		return ret;
+	}
+	ret = ci_init_smc_table(adev);
+	if (ret) {
+		DRM_ERROR("ci_init_smc_table failed\n");
+		return ret;
+	}
+	ret = ci_init_arb_table_index(adev);
+	if (ret) {
+		DRM_ERROR("ci_init_arb_table_index failed\n");
+		return ret;
+	}
+	if (pi->caps_dynamic_ac_timing) {
+		ret = ci_populate_initial_mc_reg_table(adev);
+		if (ret) {
+			DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
+			return ret;
+		}
+	}
+	ret = ci_populate_pm_base(adev);
+	if (ret) {
+		DRM_ERROR("ci_populate_pm_base failed\n");
+		return ret;
+	}
+	ci_dpm_start_smc(adev);
+	ci_enable_vr_hot_gpio_interrupt(adev);
+	ret = ci_notify_smc_display_change(adev, false);
+	if (ret) {
+		DRM_ERROR("ci_notify_smc_display_change failed\n");
+		return ret;
+	}
+	ci_enable_sclk_control(adev, true);
+	ret = ci_enable_ulv(adev, true);
+	if (ret) {
+		DRM_ERROR("ci_enable_ulv failed\n");
+		return ret;
+	}
+	ret = ci_enable_ds_master_switch(adev, true);
+	if (ret) {
+		DRM_ERROR("ci_enable_ds_master_switch failed\n");
+		return ret;
+	}
+	ret = ci_start_dpm(adev);
+	if (ret) {
+		DRM_ERROR("ci_start_dpm failed\n");
+		return ret;
+	}
+	ret = ci_enable_didt(adev, true);
+	if (ret) {
+		DRM_ERROR("ci_enable_didt failed\n");
+		return ret;
+	}
+	ret = ci_enable_smc_cac(adev, true);
+	if (ret) {
+		DRM_ERROR("ci_enable_smc_cac failed\n");
+		return ret;
+	}
+	ret = ci_enable_power_containment(adev, true);
+	if (ret) {
+		DRM_ERROR("ci_enable_power_containment failed\n");
+		return ret;
+	}
+
+	ret = ci_power_control_set_level(adev);
+	if (ret) {
+		DRM_ERROR("ci_power_control_set_level failed\n");
+		return ret;
+	}
+
+	ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+	ret = ci_enable_thermal_based_sclk_dpm(adev, true);
+	if (ret) {
+		DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
+		return ret;
+	}
+
+	ci_thermal_start_thermal_controller(adev);
+
+	ci_update_current_ps(adev, boot_ps);
+
+	if (adev->irq.installed &&
+	    amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
+#if 0
+		PPSMC_Result result;
+#endif
+		ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
+						       CISLANDS_TEMP_RANGE_MAX);
+		if (ret) {
+			DRM_ERROR("ci_thermal_set_temperature_range failed\n");
+			return ret;
+		}
+		amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
+			       AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
+		amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
+			       AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
+
+#if 0
+		result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
+
+		if (result != PPSMC_Result_OK)
+			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
+#endif
+	}
+
+	return 0;
+}
+
+static void ci_dpm_disable(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
+
+	amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
+		       AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
+	amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
+		       AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
+
+	ci_dpm_powergate_uvd(adev, false);
+
+	if (!amdgpu_ci_is_smc_running(adev))
+		return;
+
+	ci_thermal_stop_thermal_controller(adev);
+
+	if (pi->thermal_protection)
+		ci_enable_thermal_protection(adev, false);
+	ci_enable_power_containment(adev, false);
+	ci_enable_smc_cac(adev, false);
+	ci_enable_didt(adev, false);
+	ci_enable_spread_spectrum(adev, false);
+	ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
+	ci_stop_dpm(adev);
+	ci_enable_ds_master_switch(adev, false);
+	ci_enable_ulv(adev, false);
+	ci_clear_vc(adev);
+	ci_reset_to_default(adev);
+	ci_dpm_stop_smc(adev);
+	ci_force_switch_to_arb_f0(adev);
+	ci_enable_thermal_based_sclk_dpm(adev, false);
+
+	ci_update_current_ps(adev, boot_ps);
+}
+
+static int ci_dpm_set_power_state(struct amdgpu_device *adev)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct amdgpu_ps *new_ps = &pi->requested_rps;
+	struct amdgpu_ps *old_ps = &pi->current_rps;
+	int ret;
+
+	ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps);
+	if (pi->pcie_performance_request)
+		ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
+	ret = ci_freeze_sclk_mclk_dpm(adev);
+	if (ret) {
+		DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
+		return ret;
+	}
+	ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps);
+	if (ret) {
+		DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
+		return ret;
+	}
+	ret = ci_generate_dpm_level_enable_mask(adev, new_ps);
+	if (ret) {
+		DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
+		return ret;
+	}
+
+	ret = ci_update_vce_dpm(adev, new_ps, old_ps);
+	if (ret) {
+		DRM_ERROR("ci_update_vce_dpm failed\n");
+		return ret;
+	}
+
+	ret = ci_update_sclk_t(adev);
+	if (ret) {
+		DRM_ERROR("ci_update_sclk_t failed\n");
+		return ret;
+	}
+	if (pi->caps_dynamic_ac_timing) {
+		ret = ci_update_and_upload_mc_reg_table(adev);
+		if (ret) {
+			DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
+			return ret;
+		}
+	}
+	ret = ci_program_memory_timing_parameters(adev);
+	if (ret) {
+		DRM_ERROR("ci_program_memory_timing_parameters failed\n");
+		return ret;
+	}
+	ret = ci_unfreeze_sclk_mclk_dpm(adev);
+	if (ret) {
+		DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
+		return ret;
+	}
+	ret = ci_upload_dpm_level_enable_mask(adev);
+	if (ret) {
+		DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
+		return ret;
+	}
+	if (pi->pcie_performance_request)
+		ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
+
+	return 0;
+}
+
+#if 0
+static void ci_dpm_reset_asic(struct amdgpu_device *adev)
+{
+	ci_set_boot_state(adev);
+}
+#endif
+
+static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev)
+{
+	ci_program_display_gap(adev);
+}
+
+union power_info {
+	struct _ATOM_POWERPLAY_INFO info;
+	struct _ATOM_POWERPLAY_INFO_V2 info_2;
+	struct _ATOM_POWERPLAY_INFO_V3 info_3;
+	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+};
+
+union pplib_clock_info {
+	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
+	struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
+};
+
+union pplib_power_state {
+	struct _ATOM_PPLIB_STATE v1;
+	struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev,
+					  struct amdgpu_ps *rps,
+					  struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+					  u8 table_rev)
+{
+	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+	rps->class = le16_to_cpu(non_clock_info->usClassification);
+	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+	} else {
+		rps->vclk = 0;
+		rps->dclk = 0;
+	}
+
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+		adev->pm.dpm.boot_ps = rps;
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+		adev->pm.dpm.uvd_ps = rps;
+}
+
+static void ci_parse_pplib_clock_info(struct amdgpu_device *adev,
+				      struct amdgpu_ps *rps, int index,
+				      union pplib_clock_info *clock_info)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct ci_ps *ps = ci_get_ps(rps);
+	struct ci_pl *pl = &ps->performance_levels[index];
+
+	ps->performance_level_count = index + 1;
+
+	pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
+	pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
+	pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
+	pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
+
+	pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
+						   pi->sys_pcie_mask,
+						   pi->vbios_boot_state.pcie_gen_bootup_value,
+						   clock_info->ci.ucPCIEGen);
+	pl->pcie_lane = amdgpu_get_pcie_lane_support(adev,
+						     pi->vbios_boot_state.pcie_lane_bootup_value,
+						     le16_to_cpu(clock_info->ci.usPCIELane));
+
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
+		pi->acpi_pcie_gen = pl->pcie_gen;
+	}
+
+	if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
+		pi->ulv.supported = true;
+		pi->ulv.pl = *pl;
+		pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
+	}
+
+	/* patch up boot state */
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+		pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
+		pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
+		pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
+		pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
+	}
+
+	switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+		pi->use_pcie_powersaving_levels = true;
+		if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
+			pi->pcie_gen_powersaving.max = pl->pcie_gen;
+		if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
+			pi->pcie_gen_powersaving.min = pl->pcie_gen;
+		if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
+			pi->pcie_lane_powersaving.max = pl->pcie_lane;
+		if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
+			pi->pcie_lane_powersaving.min = pl->pcie_lane;
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+		pi->use_pcie_performance_levels = true;
+		if (pi->pcie_gen_performance.max < pl->pcie_gen)
+			pi->pcie_gen_performance.max = pl->pcie_gen;
+		if (pi->pcie_gen_performance.min > pl->pcie_gen)
+			pi->pcie_gen_performance.min = pl->pcie_gen;
+		if (pi->pcie_lane_performance.max < pl->pcie_lane)
+			pi->pcie_lane_performance.max = pl->pcie_lane;
+		if (pi->pcie_lane_performance.min > pl->pcie_lane)
+			pi->pcie_lane_performance.min = pl->pcie_lane;
+		break;
+	default:
+		break;
+	}
+}
+
+static int ci_parse_power_table(struct amdgpu_device *adev)
+{
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i, j, k, non_clock_array_index, clock_array_index;
+	union pplib_clock_info *clock_info;
+	struct _StateArray *state_array;
+	struct _ClockInfoArray *clock_info_array;
+	struct _NonClockInfoArray *non_clock_info_array;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+	u8 *power_state_offset;
+	struct ci_ps *ps;
+
+	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return -EINVAL;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	amdgpu_add_thermal_controller(adev);
+
+	state_array = (struct _StateArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
+	clock_info_array = (struct _ClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+	non_clock_info_array = (struct _NonClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+
+	adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
+				  state_array->ucNumEntries, GFP_KERNEL);
+	if (!adev->pm.dpm.ps)
+		return -ENOMEM;
+	power_state_offset = (u8 *)state_array->states;
+	for (i = 0; i < state_array->ucNumEntries; i++) {
+		u8 *idx;
+		power_state = (union pplib_power_state *)power_state_offset;
+		non_clock_array_index = power_state->v2.nonClockInfoIndex;
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			&non_clock_info_array->nonClockInfo[non_clock_array_index];
+		ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
+		if (ps == NULL) {
+			kfree(adev->pm.dpm.ps);
+			return -ENOMEM;
+		}
+		adev->pm.dpm.ps[i].ps_priv = ps;
+		ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
+					      non_clock_info,
+					      non_clock_info_array->ucEntrySize);
+		k = 0;
+		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+			clock_array_index = idx[j];
+			if (clock_array_index >= clock_info_array->ucNumEntries)
+				continue;
+			if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
+				break;
+			clock_info = (union pplib_clock_info *)
+				((u8 *)&clock_info_array->clockInfo[0] +
+				 (clock_array_index * clock_info_array->ucEntrySize));
+			ci_parse_pplib_clock_info(adev,
+						  &adev->pm.dpm.ps[i], k,
+						  clock_info);
+			k++;
+		}
+		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+	}
+	adev->pm.dpm.num_ps = state_array->ucNumEntries;
+
+	/* fill in the vce power states */
+	for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+		u32 sclk, mclk;
+		clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
+		clock_info = (union pplib_clock_info *)
+			&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+		sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
+		sclk |= clock_info->ci.ucEngineClockHigh << 16;
+		mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
+		mclk |= clock_info->ci.ucMemoryClockHigh << 16;
+		adev->pm.dpm.vce_states[i].sclk = sclk;
+		adev->pm.dpm.vce_states[i].mclk = mclk;
+	}
+
+	return 0;
+}
+
+static int ci_get_vbios_boot_values(struct amdgpu_device *adev,
+				    struct ci_vbios_boot_state *boot_state)
+{
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+	ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
+	u8 frev, crev;
+	u16 data_offset;
+
+	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		firmware_info =
+			(ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
+						    data_offset);
+		boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
+		boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
+		boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
+		boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev);
+		boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev);
+		boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
+		boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
+
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static void ci_dpm_fini(struct amdgpu_device *adev)
+{
+	int i;
+
+	for (i = 0; i < adev->pm.dpm.num_ps; i++) {
+		kfree(adev->pm.dpm.ps[i].ps_priv);
+	}
+	kfree(adev->pm.dpm.ps);
+	kfree(adev->pm.dpm.priv);
+	kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
+	amdgpu_free_extended_power_table(adev);
+}
+
+/**
+ * ci_dpm_init_microcode - load ucode images from disk
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use the firmware interface to load the ucode images into
+ * the driver (not loaded into hw).
+ * Returns 0 on success, error on failure.
+ */
+static int ci_dpm_init_microcode(struct amdgpu_device *adev)
+{
+	const char *chip_name;
+	char fw_name[30];
+	int err;
+
+	DRM_DEBUG("\n");
+
+	switch (adev->asic_type) {
+	case CHIP_BONAIRE:
+		chip_name = "bonaire";
+		break;
+	case CHIP_HAWAII:
+		chip_name = "hawaii";
+		break;
+	case CHIP_KAVERI:
+	case CHIP_KABINI:
+	default: BUG();
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+	err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+	if (err)
+		goto out;
+	err = amdgpu_ucode_validate(adev->pm.fw);
+
+out:
+	if (err) {
+		printk(KERN_ERR
+		       "cik_smc: Failed to load firmware \"%s\"\n",
+		       fw_name);
+		release_firmware(adev->pm.fw);
+		adev->pm.fw = NULL;
+	}
+	return err;
+}
+
+static int ci_dpm_init(struct amdgpu_device *adev)
+{
+	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
+	SMU7_Discrete_DpmTable *dpm_table;
+	struct amdgpu_gpio_rec gpio;
+	u16 data_offset, size;
+	u8 frev, crev;
+	struct ci_power_info *pi;
+	int ret;
+	u32 mask;
+
+	pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
+	if (pi == NULL)
+		return -ENOMEM;
+	adev->pm.dpm.priv = pi;
+
+	ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+	if (ret)
+		pi->sys_pcie_mask = 0;
+	else
+		pi->sys_pcie_mask = mask;
+	pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+
+	pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
+	pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3;
+	pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1;
+	pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3;
+
+	pi->pcie_lane_performance.max = 0;
+	pi->pcie_lane_performance.min = 16;
+	pi->pcie_lane_powersaving.max = 0;
+	pi->pcie_lane_powersaving.min = 16;
+
+	ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state);
+	if (ret) {
+		ci_dpm_fini(adev);
+		return ret;
+	}
+
+	ret = amdgpu_get_platform_caps(adev);
+	if (ret) {
+		ci_dpm_fini(adev);
+		return ret;
+	}
+
+	ret = amdgpu_parse_extended_power_table(adev);
+	if (ret) {
+		ci_dpm_fini(adev);
+		return ret;
+	}
+
+	ret = ci_parse_power_table(adev);
+	if (ret) {
+		ci_dpm_fini(adev);
+		return ret;
+	}
+
+	pi->dll_default_on = false;
+	pi->sram_end = SMC_RAM_END;
+
+	pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
+	pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
+	pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
+	pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
+	pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
+	pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
+	pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
+	pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
+
+	pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
+
+	pi->sclk_dpm_key_disabled = 0;
+	pi->mclk_dpm_key_disabled = 0;
+	pi->pcie_dpm_key_disabled = 0;
+	pi->thermal_sclk_dpm_enabled = 0;
+
+	pi->caps_sclk_ds = true;
+
+	pi->mclk_strobe_mode_threshold = 40000;
+	pi->mclk_stutter_mode_threshold = 40000;
+	pi->mclk_edc_enable_threshold = 40000;
+	pi->mclk_edc_wr_enable_threshold = 40000;
+
+	ci_initialize_powertune_defaults(adev);
+
+	pi->caps_fps = false;
+
+	pi->caps_sclk_throttle_low_notification = false;
+
+	pi->caps_uvd_dpm = true;
+	pi->caps_vce_dpm = true;
+
+	ci_get_leakage_voltages(adev);
+	ci_patch_dependency_tables_with_leakage(adev);
+	ci_set_private_data_variables_based_on_pptable(adev);
+
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
+		kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
+	if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
+		ci_dpm_fini(adev);
+		return -ENOMEM;
+	}
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
+	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
+
+	adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
+	adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
+	adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
+
+	adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
+	adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
+	adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
+	adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
+
+	if (adev->asic_type == CHIP_HAWAII) {
+		pi->thermal_temp_setting.temperature_low = 94500;
+		pi->thermal_temp_setting.temperature_high = 95000;
+		pi->thermal_temp_setting.temperature_shutdown = 104000;
+	} else {
+		pi->thermal_temp_setting.temperature_low = 99500;
+		pi->thermal_temp_setting.temperature_high = 100000;
+		pi->thermal_temp_setting.temperature_shutdown = 104000;
+	}
+
+	pi->uvd_enabled = false;
+
+	dpm_table = &pi->smc_state_table;
+
+	gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID);
+	if (gpio.valid) {
+		dpm_table->VRHotGpio = gpio.shift;
+		adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
+	} else {
+		dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
+		adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
+	}
+
+	gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID);
+	if (gpio.valid) {
+		dpm_table->AcDcGpio = gpio.shift;
+		adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
+	} else {
+		dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
+		adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
+	}
+
+	gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID);
+	if (gpio.valid) {
+		u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL);
+
+		switch (gpio.shift) {
+		case 0:
+			tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
+			tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
+			break;
+		case 1:
+			tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
+			tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
+			break;
+		case 2:
+			tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK;
+			break;
+		case 3:
+			tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK;
+			break;
+		case 4:
+			tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
+			break;
+		default:
+			DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
+			break;
+		}
+		WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
+	}
+
+	pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
+	pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
+	pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
+	if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
+		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
+	else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
+		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
+
+	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
+		if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
+			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
+		else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
+			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
+		else
+			adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
+	}
+
+	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
+		if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
+			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
+		else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
+			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
+		else
+			adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
+	}
+
+	pi->vddc_phase_shed_control = true;
+
+#if defined(CONFIG_ACPI)
+	pi->pcie_performance_request =
+		amdgpu_acpi_is_pcie_performance_request_supported(adev);
+#else
+	pi->pcie_performance_request = false;
+#endif
+
+	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		pi->caps_sclk_ss_support = true;
+		pi->caps_mclk_ss_support = true;
+		pi->dynamic_ss = true;
+	} else {
+		pi->caps_sclk_ss_support = false;
+		pi->caps_mclk_ss_support = false;
+		pi->dynamic_ss = true;
+	}
+
+	if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
+		pi->thermal_protection = true;
+	else
+		pi->thermal_protection = false;
+
+	pi->caps_dynamic_ac_timing = true;
+
+	pi->uvd_power_gated = false;
+
+	/* make sure dc limits are valid */
+	if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+	    (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+		adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+			adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
+	pi->fan_ctrl_is_in_default_mode = true;
+
+	return 0;
+}
+
+static void
+ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+					       struct seq_file *m)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct amdgpu_ps *rps = &pi->current_rps;
+	u32 sclk = ci_get_average_sclk_freq(adev);
+	u32 mclk = ci_get_average_mclk_freq(adev);
+
+	seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis");
+	seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
+	seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
+		   sclk, mclk);
+}
+
+static void ci_dpm_print_power_state(struct amdgpu_device *adev,
+				     struct amdgpu_ps *rps)
+{
+	struct ci_ps *ps = ci_get_ps(rps);
+	struct ci_pl *pl;
+	int i;
+
+	amdgpu_dpm_print_class_info(rps->class, rps->class2);
+	amdgpu_dpm_print_cap_info(rps->caps);
+	printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+	for (i = 0; i < ps->performance_level_count; i++) {
+		pl = &ps->performance_levels[i];
+		printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
+		       i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
+	}
+	amdgpu_dpm_print_ps_status(adev, rps);
+}
+
+static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
+
+	if (low)
+		return requested_state->performance_levels[0].sclk;
+	else
+		return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
+}
+
+static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+{
+	struct ci_power_info *pi = ci_get_pi(adev);
+	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
+
+	if (low)
+		return requested_state->performance_levels[0].mclk;
+	else
+		return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
+}
+
+/* get temperature in millidegrees */
+static int ci_dpm_get_temp(struct amdgpu_device *adev)
+{
+	u32 temp;
+	int actual_temp = 0;
+
+	temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
+		CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
+
+	if (temp & 0x200)
+		actual_temp = 255;
+	else
+		actual_temp = temp & 0x1ff;
+
+	actual_temp = actual_temp * 1000;
+
+	return actual_temp;
+}
+
+static int ci_set_temperature_range(struct amdgpu_device *adev)
+{
+	int ret;
+
+	ret = ci_thermal_enable_alert(adev, false);
+	if (ret)
+		return ret;
+	ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
+					       CISLANDS_TEMP_RANGE_MAX);
+	if (ret)
+		return ret;
+	ret = ci_thermal_enable_alert(adev, true);
+	if (ret)
+		return ret;
+	return ret;
+}
+
+static int ci_dpm_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	ci_dpm_set_dpm_funcs(adev);
+	ci_dpm_set_irq_funcs(adev);
+
+	return 0;
+}
+
+static int ci_dpm_late_init(void *handle)
+{
+	int ret;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!amdgpu_dpm)
+		return 0;
+
+	ret = ci_set_temperature_range(adev);
+	if (ret)
+		return ret;
+
+	ci_dpm_powergate_uvd(adev, true);
+
+	return 0;
+}
+
+static int ci_dpm_sw_init(void *handle)
+{
+	int ret;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
+	if (ret)
+		return ret;
+
+	ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
+	if (ret)
+		return ret;
+
+	/* default to balanced state */
+	adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
+	adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
+	adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
+	adev->pm.default_sclk = adev->clock.default_sclk;
+	adev->pm.default_mclk = adev->clock.default_mclk;
+	adev->pm.current_sclk = adev->clock.default_sclk;
+	adev->pm.current_mclk = adev->clock.default_mclk;
+	adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
+
+	if (amdgpu_dpm == 0)
+		return 0;
+
+	ret = ci_dpm_init_microcode(adev);
+	if (ret)
+		return ret;
+
+	INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
+	mutex_lock(&adev->pm.mutex);
+	ret = ci_dpm_init(adev);
+	if (ret)
+		goto dpm_failed;
+	adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+	if (amdgpu_dpm == 1)
+		amdgpu_pm_print_power_states(adev);
+	ret = amdgpu_pm_sysfs_init(adev);
+	if (ret)
+		goto dpm_failed;
+	mutex_unlock(&adev->pm.mutex);
+	DRM_INFO("amdgpu: dpm initialized\n");
+
+	return 0;
+
+dpm_failed:
+	ci_dpm_fini(adev);
+	mutex_unlock(&adev->pm.mutex);
+	DRM_ERROR("amdgpu: dpm initialization failed\n");
+	return ret;
+}
+
+static int ci_dpm_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	mutex_lock(&adev->pm.mutex);
+	amdgpu_pm_sysfs_fini(adev);
+	ci_dpm_fini(adev);
+	mutex_unlock(&adev->pm.mutex);
+
+	return 0;
+}
+
+static int ci_dpm_hw_init(void *handle)
+{
+	int ret;
+
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (!amdgpu_dpm)
+		return 0;
+
+	mutex_lock(&adev->pm.mutex);
+	ci_dpm_setup_asic(adev);
+	ret = ci_dpm_enable(adev);
+	if (ret)
+		adev->pm.dpm_enabled = false;
+	else
+		adev->pm.dpm_enabled = true;
+	mutex_unlock(&adev->pm.mutex);
+
+	return ret;
+}
+
+static int ci_dpm_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (adev->pm.dpm_enabled) {
+		mutex_lock(&adev->pm.mutex);
+		ci_dpm_disable(adev);
+		mutex_unlock(&adev->pm.mutex);
+	}
+
+	return 0;
+}
+
+static int ci_dpm_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (adev->pm.dpm_enabled) {
+		mutex_lock(&adev->pm.mutex);
+		/* disable dpm */
+		ci_dpm_disable(adev);
+		/* reset the power state */
+		adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+		mutex_unlock(&adev->pm.mutex);
+	}
+	return 0;
+}
+
+static int ci_dpm_resume(void *handle)
+{
+	int ret;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (adev->pm.dpm_enabled) {
+		/* asic init will reset to the boot state */
+		mutex_lock(&adev->pm.mutex);
+		ci_dpm_setup_asic(adev);
+		ret = ci_dpm_enable(adev);
+		if (ret)
+			adev->pm.dpm_enabled = false;
+		else
+			adev->pm.dpm_enabled = true;
+		mutex_unlock(&adev->pm.mutex);
+		if (adev->pm.dpm_enabled)
+			amdgpu_pm_compute_clocks(adev);
+	}
+	return 0;
+}
+
+static bool ci_dpm_is_idle(void *handle)
+{
+	/* XXX */
+	return true;
+}
+
+static int ci_dpm_wait_for_idle(void *handle)
+{
+	/* XXX */
+	return 0;
+}
+
+static void ci_dpm_print_status(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	dev_info(adev->dev, "CIK DPM registers\n");
+	dev_info(adev->dev, "  BIOS_SCRATCH_4=0x%08X\n",
+		 RREG32(mmBIOS_SCRATCH_4));
+	dev_info(adev->dev, "  MC_ARB_DRAM_TIMING=0x%08X\n",
+		 RREG32(mmMC_ARB_DRAM_TIMING));
+	dev_info(adev->dev, "  MC_ARB_DRAM_TIMING2=0x%08X\n",
+		 RREG32(mmMC_ARB_DRAM_TIMING2));
+	dev_info(adev->dev, "  MC_ARB_BURST_TIME=0x%08X\n",
+		 RREG32(mmMC_ARB_BURST_TIME));
+	dev_info(adev->dev, "  MC_ARB_DRAM_TIMING_1=0x%08X\n",
+		 RREG32(mmMC_ARB_DRAM_TIMING_1));
+	dev_info(adev->dev, "  MC_ARB_DRAM_TIMING2_1=0x%08X\n",
+		 RREG32(mmMC_ARB_DRAM_TIMING2_1));
+	dev_info(adev->dev, "  MC_CG_CONFIG=0x%08X\n",
+		 RREG32(mmMC_CG_CONFIG));
+	dev_info(adev->dev, "  MC_ARB_CG=0x%08X\n",
+		 RREG32(mmMC_ARB_CG));
+	dev_info(adev->dev, "  DIDT_SQ_CTRL0=0x%08X\n",
+		 RREG32_DIDT(ixDIDT_SQ_CTRL0));
+	dev_info(adev->dev, "  DIDT_DB_CTRL0=0x%08X\n",
+		 RREG32_DIDT(ixDIDT_DB_CTRL0));
+	dev_info(adev->dev, "  DIDT_TD_CTRL0=0x%08X\n",
+		 RREG32_DIDT(ixDIDT_TD_CTRL0));
+	dev_info(adev->dev, "  DIDT_TCP_CTRL0=0x%08X\n",
+		 RREG32_DIDT(ixDIDT_TCP_CTRL0));
+	dev_info(adev->dev, "  CG_THERMAL_INT=0x%08X\n",
+		 RREG32_SMC(ixCG_THERMAL_INT));
+	dev_info(adev->dev, "  CG_THERMAL_CTRL=0x%08X\n",
+		 RREG32_SMC(ixCG_THERMAL_CTRL));
+	dev_info(adev->dev, "  GENERAL_PWRMGT=0x%08X\n",
+		 RREG32_SMC(ixGENERAL_PWRMGT));
+	dev_info(adev->dev, "  MC_SEQ_CNTL_3=0x%08X\n",
+		 RREG32(mmMC_SEQ_CNTL_3));
+	dev_info(adev->dev, "  LCAC_MC0_CNTL=0x%08X\n",
+		 RREG32_SMC(ixLCAC_MC0_CNTL));
+	dev_info(adev->dev, "  LCAC_MC1_CNTL=0x%08X\n",
+		 RREG32_SMC(ixLCAC_MC1_CNTL));
+	dev_info(adev->dev, "  LCAC_CPL_CNTL=0x%08X\n",
+		 RREG32_SMC(ixLCAC_CPL_CNTL));
+	dev_info(adev->dev, "  SCLK_PWRMGT_CNTL=0x%08X\n",
+		 RREG32_SMC(ixSCLK_PWRMGT_CNTL));
+	dev_info(adev->dev, "  BIF_LNCNT_RESET=0x%08X\n",
+		 RREG32(mmBIF_LNCNT_RESET));
+	dev_info(adev->dev, "  FIRMWARE_FLAGS=0x%08X\n",
+		 RREG32_SMC(ixFIRMWARE_FLAGS));
+	dev_info(adev->dev, "  CG_SPLL_FUNC_CNTL=0x%08X\n",
+		 RREG32_SMC(ixCG_SPLL_FUNC_CNTL));
+	dev_info(adev->dev, "  CG_SPLL_FUNC_CNTL_2=0x%08X\n",
+		 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2));
+	dev_info(adev->dev, "  CG_SPLL_FUNC_CNTL_3=0x%08X\n",
+		 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3));
+	dev_info(adev->dev, "  CG_SPLL_FUNC_CNTL_4=0x%08X\n",
+		 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4));
+	dev_info(adev->dev, "  CG_SPLL_SPREAD_SPECTRUM=0x%08X\n",
+		 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM));
+	dev_info(adev->dev, "  CG_SPLL_SPREAD_SPECTRUM_2=0x%08X\n",
+		 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2));
+	dev_info(adev->dev, "  DLL_CNTL=0x%08X\n",
+		 RREG32(mmDLL_CNTL));
+	dev_info(adev->dev, "  MCLK_PWRMGT_CNTL=0x%08X\n",
+		 RREG32(mmMCLK_PWRMGT_CNTL));
+	dev_info(adev->dev, "  MPLL_AD_FUNC_CNTL=0x%08X\n",
+		 RREG32(mmMPLL_AD_FUNC_CNTL));
+	dev_info(adev->dev, "  MPLL_DQ_FUNC_CNTL=0x%08X\n",
+		 RREG32(mmMPLL_DQ_FUNC_CNTL));
+	dev_info(adev->dev, "  MPLL_FUNC_CNTL=0x%08X\n",
+		 RREG32(mmMPLL_FUNC_CNTL));
+	dev_info(adev->dev, "  MPLL_FUNC_CNTL_1=0x%08X\n",
+		 RREG32(mmMPLL_FUNC_CNTL_1));
+	dev_info(adev->dev, "  MPLL_FUNC_CNTL_2=0x%08X\n",
+		 RREG32(mmMPLL_FUNC_CNTL_2));
+	dev_info(adev->dev, "  MPLL_SS1=0x%08X\n",
+		 RREG32(mmMPLL_SS1));
+	dev_info(adev->dev, "  MPLL_SS2=0x%08X\n",
+		 RREG32(mmMPLL_SS2));
+	dev_info(adev->dev, "  CG_DISPLAY_GAP_CNTL=0x%08X\n",
+		 RREG32_SMC(ixCG_DISPLAY_GAP_CNTL));
+	dev_info(adev->dev, "  CG_DISPLAY_GAP_CNTL2=0x%08X\n",
+		 RREG32_SMC(ixCG_DISPLAY_GAP_CNTL2));
+	dev_info(adev->dev, "  CG_STATIC_SCREEN_PARAMETER=0x%08X\n",
+		 RREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER));
+	dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_0=0x%08X\n",
+		 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_0));
+	dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_1=0x%08X\n",
+		 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_1));
+	dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_2=0x%08X\n",
+		 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_2));
+	dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_3=0x%08X\n",
+		 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_3));
+	dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_4=0x%08X\n",
+		 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_4));
+	dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_5=0x%08X\n",
+		 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_5));
+	dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_6=0x%08X\n",
+		 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_6));
+	dev_info(adev->dev, "  CG_FREQ_TRAN_VOTING_7=0x%08X\n",
+		 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_7));
+	dev_info(adev->dev, "  RCU_UC_EVENTS=0x%08X\n",
+		 RREG32_SMC(ixRCU_UC_EVENTS));
+	dev_info(adev->dev, "  DPM_TABLE_475=0x%08X\n",
+		 RREG32_SMC(ixDPM_TABLE_475));
+	dev_info(adev->dev, "  MC_SEQ_RAS_TIMING_LP=0x%08X\n",
+		 RREG32(mmMC_SEQ_RAS_TIMING_LP));
+	dev_info(adev->dev, "  MC_SEQ_RAS_TIMING=0x%08X\n",
+		 RREG32(mmMC_SEQ_RAS_TIMING));
+	dev_info(adev->dev, "  MC_SEQ_CAS_TIMING_LP=0x%08X\n",
+		 RREG32(mmMC_SEQ_CAS_TIMING_LP));
+	dev_info(adev->dev, "  MC_SEQ_CAS_TIMING=0x%08X\n",
+		 RREG32(mmMC_SEQ_CAS_TIMING));
+	dev_info(adev->dev, "  MC_SEQ_DLL_STBY_LP=0x%08X\n",
+		 RREG32(mmMC_SEQ_DLL_STBY_LP));
+	dev_info(adev->dev, "  MC_SEQ_DLL_STBY=0x%08X\n",
+		 RREG32(mmMC_SEQ_DLL_STBY));
+	dev_info(adev->dev, "  MC_SEQ_G5PDX_CMD0_LP=0x%08X\n",
+		 RREG32(mmMC_SEQ_G5PDX_CMD0_LP));
+	dev_info(adev->dev, "  MC_SEQ_G5PDX_CMD0=0x%08X\n",
+		 RREG32(mmMC_SEQ_G5PDX_CMD0));
+	dev_info(adev->dev, "  MC_SEQ_G5PDX_CMD1_LP=0x%08X\n",
+		 RREG32(mmMC_SEQ_G5PDX_CMD1_LP));
+	dev_info(adev->dev, "  MC_SEQ_G5PDX_CMD1=0x%08X\n",
+		 RREG32(mmMC_SEQ_G5PDX_CMD1));
+	dev_info(adev->dev, "  MC_SEQ_G5PDX_CTRL_LP=0x%08X\n",
+		 RREG32(mmMC_SEQ_G5PDX_CTRL_LP));
+	dev_info(adev->dev, "  MC_SEQ_G5PDX_CTRL=0x%08X\n",
+		 RREG32(mmMC_SEQ_G5PDX_CTRL));
+	dev_info(adev->dev, "  MC_SEQ_PMG_DVS_CMD_LP=0x%08X\n",
+		 RREG32(mmMC_SEQ_PMG_DVS_CMD_LP));
+	dev_info(adev->dev, "  MC_SEQ_PMG_DVS_CMD=0x%08X\n",
+		 RREG32(mmMC_SEQ_PMG_DVS_CMD));
+	dev_info(adev->dev, "  MC_SEQ_PMG_DVS_CTL_LP=0x%08X\n",
+		 RREG32(mmMC_SEQ_PMG_DVS_CTL_LP));
+	dev_info(adev->dev, "  MC_SEQ_PMG_DVS_CTL=0x%08X\n",
+		 RREG32(mmMC_SEQ_PMG_DVS_CTL));
+	dev_info(adev->dev, "  MC_SEQ_MISC_TIMING_LP=0x%08X\n",
+		 RREG32(mmMC_SEQ_MISC_TIMING_LP));
+	dev_info(adev->dev, "  MC_SEQ_MISC_TIMING=0x%08X\n",
+		 RREG32(mmMC_SEQ_MISC_TIMING));
+	dev_info(adev->dev, "  MC_SEQ_MISC_TIMING2_LP=0x%08X\n",
+		 RREG32(mmMC_SEQ_MISC_TIMING2_LP));
+	dev_info(adev->dev, "  MC_SEQ_MISC_TIMING2=0x%08X\n",
+		 RREG32(mmMC_SEQ_MISC_TIMING2));
+	dev_info(adev->dev, "  MC_SEQ_PMG_CMD_EMRS_LP=0x%08X\n",
+		 RREG32(mmMC_SEQ_PMG_CMD_EMRS_LP));
+	dev_info(adev->dev, "  MC_PMG_CMD_EMRS=0x%08X\n",
+		 RREG32(mmMC_PMG_CMD_EMRS));
+	dev_info(adev->dev, "  MC_SEQ_PMG_CMD_MRS_LP=0x%08X\n",
+		 RREG32(mmMC_SEQ_PMG_CMD_MRS_LP));
+	dev_info(adev->dev, "  MC_PMG_CMD_MRS=0x%08X\n",
+		 RREG32(mmMC_PMG_CMD_MRS));
+	dev_info(adev->dev, "  MC_SEQ_PMG_CMD_MRS1_LP=0x%08X\n",
+		 RREG32(mmMC_SEQ_PMG_CMD_MRS1_LP));
+	dev_info(adev->dev, "  MC_PMG_CMD_MRS1=0x%08X\n",
+		 RREG32(mmMC_PMG_CMD_MRS1));
+	dev_info(adev->dev, "  MC_SEQ_WR_CTL_D0_LP=0x%08X\n",
+		 RREG32(mmMC_SEQ_WR_CTL_D0_LP));
+	dev_info(adev->dev, "  MC_SEQ_WR_CTL_D0=0x%08X\n",
+		 RREG32(mmMC_SEQ_WR_CTL_D0));
+	dev_info(adev->dev, "  MC_SEQ_WR_CTL_D1_LP=0x%08X\n",
+		 RREG32(mmMC_SEQ_WR_CTL_D1_LP));
+	dev_info(adev->dev, "  MC_SEQ_WR_CTL_D1=0x%08X\n",
+		 RREG32(mmMC_SEQ_WR_CTL_D1));
+	dev_info(adev->dev, "  MC_SEQ_RD_CTL_D0_LP=0x%08X\n",
+		 RREG32(mmMC_SEQ_RD_CTL_D0_LP));
+	dev_info(adev->dev, "  MC_SEQ_RD_CTL_D0=0x%08X\n",
+		 RREG32(mmMC_SEQ_RD_CTL_D0));
+	dev_info(adev->dev, "  MC_SEQ_RD_CTL_D1_LP=0x%08X\n",
+		 RREG32(mmMC_SEQ_RD_CTL_D1_LP));
+	dev_info(adev->dev, "  MC_SEQ_RD_CTL_D1=0x%08X\n",
+		 RREG32(mmMC_SEQ_RD_CTL_D1));
+	dev_info(adev->dev, "  MC_SEQ_PMG_TIMING_LP=0x%08X\n",
+		 RREG32(mmMC_SEQ_PMG_TIMING_LP));
+	dev_info(adev->dev, "  MC_SEQ_PMG_TIMING=0x%08X\n",
+		 RREG32(mmMC_SEQ_PMG_TIMING));
+	dev_info(adev->dev, "  MC_SEQ_PMG_CMD_MRS2_LP=0x%08X\n",
+		 RREG32(mmMC_SEQ_PMG_CMD_MRS2_LP));
+	dev_info(adev->dev, "  MC_PMG_CMD_MRS2=0x%08X\n",
+		 RREG32(mmMC_PMG_CMD_MRS2));
+	dev_info(adev->dev, "  MC_SEQ_WR_CTL_2_LP=0x%08X\n",
+		 RREG32(mmMC_SEQ_WR_CTL_2_LP));
+	dev_info(adev->dev, "  MC_SEQ_WR_CTL_2=0x%08X\n",
+		 RREG32(mmMC_SEQ_WR_CTL_2));
+	dev_info(adev->dev, "  PCIE_LC_SPEED_CNTL=0x%08X\n",
+		 RREG32_PCIE(ixPCIE_LC_SPEED_CNTL));
+	dev_info(adev->dev, "  PCIE_LC_LINK_WIDTH_CNTL=0x%08X\n",
+		 RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL));
+	dev_info(adev->dev, "  SMC_IND_INDEX_0=0x%08X\n",
+		 RREG32(mmSMC_IND_INDEX_0));
+	dev_info(adev->dev, "  SMC_IND_DATA_0=0x%08X\n",
+		 RREG32(mmSMC_IND_DATA_0));
+	dev_info(adev->dev, "  SMC_IND_ACCESS_CNTL=0x%08X\n",
+		 RREG32(mmSMC_IND_ACCESS_CNTL));
+	dev_info(adev->dev, "  SMC_RESP_0=0x%08X\n",
+		 RREG32(mmSMC_RESP_0));
+	dev_info(adev->dev, "  SMC_MESSAGE_0=0x%08X\n",
+		 RREG32(mmSMC_MESSAGE_0));
+	dev_info(adev->dev, "  SMC_SYSCON_RESET_CNTL=0x%08X\n",
+		 RREG32_SMC(ixSMC_SYSCON_RESET_CNTL));
+	dev_info(adev->dev, "  SMC_SYSCON_CLOCK_CNTL_0=0x%08X\n",
+		 RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0));
+	dev_info(adev->dev, "  SMC_SYSCON_MISC_CNTL=0x%08X\n",
+		 RREG32_SMC(ixSMC_SYSCON_MISC_CNTL));
+	dev_info(adev->dev, "  SMC_PC_C=0x%08X\n",
+		 RREG32_SMC(ixSMC_PC_C));
+}
+
+static int ci_dpm_soft_reset(void *handle)
+{
+	return 0;
+}
+
+static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
+				      struct amdgpu_irq_src *source,
+				      unsigned type,
+				      enum amdgpu_interrupt_state state)
+{
+	u32 cg_thermal_int;
+
+	switch (type) {
+	case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
+		switch (state) {
+		case AMDGPU_IRQ_STATE_DISABLE:
+			cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
+			cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
+			WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
+			break;
+		case AMDGPU_IRQ_STATE_ENABLE:
+			cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
+			cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
+			WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
+			break;
+		default:
+			break;
+		}
+		break;
+
+	case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
+		switch (state) {
+		case AMDGPU_IRQ_STATE_DISABLE:
+			cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
+			cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
+			WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
+			break;
+		case AMDGPU_IRQ_STATE_ENABLE:
+			cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
+			cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
+			WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
+			break;
+		default:
+			break;
+		}
+		break;
+
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
+				    struct amdgpu_irq_src *source, 
+				    struct amdgpu_iv_entry *entry)
+{
+	bool queue_thermal = false;
+
+	if (entry == NULL)
+		return -EINVAL;
+
+	switch (entry->src_id) {
+	case 230: /* thermal low to high */
+		DRM_DEBUG("IH: thermal low to high\n");
+		adev->pm.dpm.thermal.high_to_low = false;
+		queue_thermal = true;
+		break;
+	case 231: /* thermal high to low */
+		DRM_DEBUG("IH: thermal high to low\n");
+		adev->pm.dpm.thermal.high_to_low = true;
+		queue_thermal = true;
+		break;
+	default:
+		break;
+	}
+
+	if (queue_thermal)
+		schedule_work(&adev->pm.dpm.thermal.work);
+
+	return 0;
+}
+
+static int ci_dpm_set_clockgating_state(void *handle,
+					  enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int ci_dpm_set_powergating_state(void *handle,
+					  enum amd_powergating_state state)
+{
+	return 0;
+}
+
+const struct amd_ip_funcs ci_dpm_ip_funcs = {
+	.early_init = ci_dpm_early_init,
+	.late_init = ci_dpm_late_init,
+	.sw_init = ci_dpm_sw_init,
+	.sw_fini = ci_dpm_sw_fini,
+	.hw_init = ci_dpm_hw_init,
+	.hw_fini = ci_dpm_hw_fini,
+	.suspend = ci_dpm_suspend,
+	.resume = ci_dpm_resume,
+	.is_idle = ci_dpm_is_idle,
+	.wait_for_idle = ci_dpm_wait_for_idle,
+	.soft_reset = ci_dpm_soft_reset,
+	.print_status = ci_dpm_print_status,
+	.set_clockgating_state = ci_dpm_set_clockgating_state,
+	.set_powergating_state = ci_dpm_set_powergating_state,
+};
+
+static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
+	.get_temperature = &ci_dpm_get_temp,
+	.pre_set_power_state = &ci_dpm_pre_set_power_state,
+	.set_power_state = &ci_dpm_set_power_state,
+	.post_set_power_state = &ci_dpm_post_set_power_state,
+	.display_configuration_changed = &ci_dpm_display_configuration_changed,
+	.get_sclk = &ci_dpm_get_sclk,
+	.get_mclk = &ci_dpm_get_mclk,
+	.print_power_state = &ci_dpm_print_power_state,
+	.debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
+	.force_performance_level = &ci_dpm_force_performance_level,
+	.vblank_too_short = &ci_dpm_vblank_too_short,
+	.powergate_uvd = &ci_dpm_powergate_uvd,
+	.set_fan_control_mode = &ci_dpm_set_fan_control_mode,
+	.get_fan_control_mode = &ci_dpm_get_fan_control_mode,
+	.set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
+	.get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
+};
+
+static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
+{
+	if (adev->pm.funcs == NULL)
+		adev->pm.funcs = &ci_dpm_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
+	.set = ci_dpm_set_interrupt_state,
+	.process = ci_dpm_process_interrupt,
+};
+
+static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
+{
+	adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
+	adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
+}

+ 348 - 0
drivers/gpu/drm/amd/amdgpu/ci_dpm.h

@@ -0,0 +1,348 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __CI_DPM_H__
+#define __CI_DPM_H__
+
+#include "amdgpu_atombios.h"
+#include "ppsmc.h"
+
+#define SMU__NUM_SCLK_DPM_STATE  8
+#define SMU__NUM_MCLK_DPM_LEVELS 6
+#define SMU__NUM_LCLK_DPM_LEVELS 8
+#define SMU__NUM_PCIE_DPM_LEVELS 8
+#include "smu7_discrete.h"
+
+#define CISLANDS_MAX_HARDWARE_POWERLEVELS 2
+
+#define CISLANDS_UNUSED_GPIO_PIN 0x7F
+
+struct ci_pl {
+	u32 mclk;
+	u32 sclk;
+	enum amdgpu_pcie_gen pcie_gen;
+	u16 pcie_lane;
+};
+
+struct ci_ps {
+	u16 performance_level_count;
+	bool dc_compatible;
+	u32 sclk_t;
+	struct ci_pl performance_levels[CISLANDS_MAX_HARDWARE_POWERLEVELS];
+};
+
+struct ci_dpm_level {
+	bool enabled;
+	u32 value;
+	u32 param1;
+};
+
+#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define MAX_REGULAR_DPM_NUMBER 8
+#define CISLAND_MINIMUM_ENGINE_CLOCK 800
+
+struct ci_single_dpm_table {
+	u32 count;
+	struct ci_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
+};
+
+struct ci_dpm_table {
+	struct ci_single_dpm_table sclk_table;
+	struct ci_single_dpm_table mclk_table;
+	struct ci_single_dpm_table pcie_speed_table;
+	struct ci_single_dpm_table vddc_table;
+	struct ci_single_dpm_table vddci_table;
+	struct ci_single_dpm_table mvdd_table;
+};
+
+struct ci_mc_reg_entry {
+	u32 mclk_max;
+	u32 mc_data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ci_mc_reg_table {
+	u8 last;
+	u8 num_entries;
+	u16 valid_flag;
+	struct ci_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+	SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ci_ulv_parm
+{
+	bool supported;
+	u32 cg_ulv_parameter;
+	u32 volt_change_delay;
+	struct ci_pl pl;
+};
+
+#define CISLANDS_MAX_LEAKAGE_COUNT  8
+
+struct ci_leakage_voltage {
+	u16 count;
+	u16 leakage_id[CISLANDS_MAX_LEAKAGE_COUNT];
+	u16 actual_voltage[CISLANDS_MAX_LEAKAGE_COUNT];
+};
+
+struct ci_dpm_level_enable_mask {
+	u32 uvd_dpm_enable_mask;
+	u32 vce_dpm_enable_mask;
+	u32 acp_dpm_enable_mask;
+	u32 samu_dpm_enable_mask;
+	u32 sclk_dpm_enable_mask;
+	u32 mclk_dpm_enable_mask;
+	u32 pcie_dpm_enable_mask;
+};
+
+struct ci_vbios_boot_state
+{
+	u16 mvdd_bootup_value;
+	u16 vddc_bootup_value;
+	u16 vddci_bootup_value;
+	u32 sclk_bootup_value;
+	u32 mclk_bootup_value;
+	u16 pcie_gen_bootup_value;
+	u16 pcie_lane_bootup_value;
+};
+
+struct ci_clock_registers {
+	u32 cg_spll_func_cntl;
+	u32 cg_spll_func_cntl_2;
+	u32 cg_spll_func_cntl_3;
+	u32 cg_spll_func_cntl_4;
+	u32 cg_spll_spread_spectrum;
+	u32 cg_spll_spread_spectrum_2;
+	u32 dll_cntl;
+	u32 mclk_pwrmgt_cntl;
+	u32 mpll_ad_func_cntl;
+	u32 mpll_dq_func_cntl;
+	u32 mpll_func_cntl;
+	u32 mpll_func_cntl_1;
+	u32 mpll_func_cntl_2;
+	u32 mpll_ss1;
+	u32 mpll_ss2;
+};
+
+struct ci_thermal_temperature_setting {
+	s32 temperature_low;
+	s32 temperature_high;
+	s32 temperature_shutdown;
+};
+
+struct ci_pcie_perf_range {
+	u16 max;
+	u16 min;
+};
+
+enum ci_pt_config_reg_type {
+	CISLANDS_CONFIGREG_MMR = 0,
+	CISLANDS_CONFIGREG_SMC_IND,
+	CISLANDS_CONFIGREG_DIDT_IND,
+	CISLANDS_CONFIGREG_CACHE,
+	CISLANDS_CONFIGREG_MAX
+};
+
+#define POWERCONTAINMENT_FEATURE_BAPM            0x00000001
+#define POWERCONTAINMENT_FEATURE_TDCLimit        0x00000002
+#define POWERCONTAINMENT_FEATURE_PkgPwrLimit     0x00000004
+
+struct ci_pt_config_reg {
+	u32 offset;
+	u32 mask;
+	u32 shift;
+	u32 value;
+	enum ci_pt_config_reg_type type;
+};
+
+struct ci_pt_defaults {
+	u8 svi_load_line_en;
+	u8 svi_load_line_vddc;
+	u8 tdc_vddc_throttle_release_limit_perc;
+	u8 tdc_mawt;
+	u8 tdc_waterfall_ctl;
+	u8 dte_ambient_temp_base;
+	u32 display_cac;
+	u32 bapm_temp_gradient;
+	u16 bapmti_r[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
+	u16 bapmti_rc[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
+};
+
+#define DPMTABLE_OD_UPDATE_SCLK     0x00000001
+#define DPMTABLE_OD_UPDATE_MCLK     0x00000002
+#define DPMTABLE_UPDATE_SCLK        0x00000004
+#define DPMTABLE_UPDATE_MCLK        0x00000008
+
+struct ci_power_info {
+	struct ci_dpm_table dpm_table;
+	u32 voltage_control;
+	u32 mvdd_control;
+	u32 vddci_control;
+	u32 active_auto_throttle_sources;
+	struct ci_clock_registers clock_registers;
+	u16 acpi_vddc;
+	u16 acpi_vddci;
+	enum amdgpu_pcie_gen force_pcie_gen;
+	enum amdgpu_pcie_gen acpi_pcie_gen;
+	struct ci_leakage_voltage vddc_leakage;
+	struct ci_leakage_voltage vddci_leakage;
+	u16 max_vddc_in_pp_table;
+	u16 min_vddc_in_pp_table;
+	u16 max_vddci_in_pp_table;
+	u16 min_vddci_in_pp_table;
+	u32 mclk_strobe_mode_threshold;
+	u32 mclk_stutter_mode_threshold;
+	u32 mclk_edc_enable_threshold;
+	u32 mclk_edc_wr_enable_threshold;
+	struct ci_vbios_boot_state vbios_boot_state;
+	/* smc offsets */
+	u32 sram_end;
+	u32 dpm_table_start;
+	u32 soft_regs_start;
+	u32 mc_reg_table_start;
+	u32 fan_table_start;
+	u32 arb_table_start;
+	/* smc tables */
+	SMU7_Discrete_DpmTable smc_state_table;
+	SMU7_Discrete_MCRegisters smc_mc_reg_table;
+	SMU7_Discrete_PmFuses smc_powertune_table;
+	/* other stuff */
+	struct ci_mc_reg_table mc_reg_table;
+	struct atom_voltage_table vddc_voltage_table;
+	struct atom_voltage_table vddci_voltage_table;
+	struct atom_voltage_table mvdd_voltage_table;
+	struct ci_ulv_parm ulv;
+	u32 power_containment_features;
+	const struct ci_pt_defaults *powertune_defaults;
+	u32 dte_tj_offset;
+	bool vddc_phase_shed_control;
+	struct ci_thermal_temperature_setting thermal_temp_setting;
+	struct ci_dpm_level_enable_mask dpm_level_enable_mask;
+	u32 need_update_smu7_dpm_table;
+	u32 sclk_dpm_key_disabled;
+	u32 mclk_dpm_key_disabled;
+	u32 pcie_dpm_key_disabled;
+	u32 thermal_sclk_dpm_enabled;
+	struct ci_pcie_perf_range pcie_gen_performance;
+	struct ci_pcie_perf_range pcie_lane_performance;
+	struct ci_pcie_perf_range pcie_gen_powersaving;
+	struct ci_pcie_perf_range pcie_lane_powersaving;
+	u32 activity_target[SMU7_MAX_LEVELS_GRAPHICS];
+	u32 mclk_activity_target;
+	u32 low_sclk_interrupt_t;
+	u32 last_mclk_dpm_enable_mask;
+	u32 sys_pcie_mask;
+	/* caps */
+	bool caps_power_containment;
+	bool caps_cac;
+	bool caps_sq_ramping;
+	bool caps_db_ramping;
+	bool caps_td_ramping;
+	bool caps_tcp_ramping;
+	bool caps_fps;
+	bool caps_sclk_ds;
+	bool caps_sclk_ss_support;
+	bool caps_mclk_ss_support;
+	bool caps_uvd_dpm;
+	bool caps_vce_dpm;
+	bool caps_samu_dpm;
+	bool caps_acp_dpm;
+	bool caps_automatic_dc_transition;
+	bool caps_sclk_throttle_low_notification;
+	bool caps_dynamic_ac_timing;
+	bool caps_od_fuzzy_fan_control_support;
+	/* flags */
+	bool thermal_protection;
+	bool pcie_performance_request;
+	bool dynamic_ss;
+	bool dll_default_on;
+	bool cac_enabled;
+	bool uvd_enabled;
+	bool battery_state;
+	bool pspp_notify_required;
+	bool enable_bapm_feature;
+	bool enable_tdc_limit_feature;
+	bool enable_pkg_pwr_tracking_feature;
+	bool use_pcie_performance_levels;
+	bool use_pcie_powersaving_levels;
+	bool uvd_power_gated;
+	/* driver states */
+	struct amdgpu_ps current_rps;
+	struct ci_ps current_ps;
+	struct amdgpu_ps requested_rps;
+	struct ci_ps requested_ps;
+	/* fan control */
+	bool fan_ctrl_is_in_default_mode;
+	bool fan_is_controlled_by_smc;
+	u32 t_min;
+	u32 fan_ctrl_default_mode;
+};
+
+#define CISLANDS_VOLTAGE_CONTROL_NONE                   0x0
+#define CISLANDS_VOLTAGE_CONTROL_BY_GPIO                0x1
+#define CISLANDS_VOLTAGE_CONTROL_BY_SVID2               0x2
+
+#define CISLANDS_Q88_FORMAT_CONVERSION_UNIT             256
+
+#define CISLANDS_VRC_DFLT0                              0x3FFFC000
+#define CISLANDS_VRC_DFLT1                              0x000400
+#define CISLANDS_VRC_DFLT2                              0xC00080
+#define CISLANDS_VRC_DFLT3                              0xC00200
+#define CISLANDS_VRC_DFLT4                              0xC01680
+#define CISLANDS_VRC_DFLT5                              0xC00033
+#define CISLANDS_VRC_DFLT6                              0xC00033
+#define CISLANDS_VRC_DFLT7                              0x3FFFC000
+
+#define CISLANDS_CGULVPARAMETER_DFLT                    0x00040035
+#define CISLAND_TARGETACTIVITY_DFLT                     30
+#define CISLAND_MCLK_TARGETACTIVITY_DFLT                10
+
+#define PCIE_PERF_REQ_REMOVE_REGISTRY   0
+#define PCIE_PERF_REQ_FORCE_LOWPOWER    1
+#define PCIE_PERF_REQ_PECI_GEN1         2
+#define PCIE_PERF_REQ_PECI_GEN2         3
+#define PCIE_PERF_REQ_PECI_GEN3         4
+
+#define CISLANDS_SSTU_DFLT                               0
+#define CISLANDS_SST_DFLT                                0x00C8
+
+/* XXX are these ok? */
+#define CISLANDS_TEMP_RANGE_MIN (90 * 1000)
+#define CISLANDS_TEMP_RANGE_MAX (120 * 1000)
+
+int amdgpu_ci_copy_bytes_to_smc(struct amdgpu_device *adev,
+			 u32 smc_start_address,
+			 const u8 *src, u32 byte_count, u32 limit);
+void amdgpu_ci_start_smc(struct amdgpu_device *adev);
+void amdgpu_ci_reset_smc(struct amdgpu_device *adev);
+int amdgpu_ci_program_jump_on_start(struct amdgpu_device *adev);
+void amdgpu_ci_stop_smc_clock(struct amdgpu_device *adev);
+void amdgpu_ci_start_smc_clock(struct amdgpu_device *adev);
+bool amdgpu_ci_is_smc_running(struct amdgpu_device *adev);
+PPSMC_Result amdgpu_ci_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg);
+PPSMC_Result amdgpu_ci_wait_for_smc_inactive(struct amdgpu_device *adev);
+int amdgpu_ci_load_smc_ucode(struct amdgpu_device *adev, u32 limit);
+int amdgpu_ci_read_smc_sram_dword(struct amdgpu_device *adev,
+			   u32 smc_address, u32 *value, u32 limit);
+int amdgpu_ci_write_smc_sram_dword(struct amdgpu_device *adev,
+			    u32 smc_address, u32 value, u32 limit);
+
+#endif

+ 279 - 0
drivers/gpu/drm/amd/amdgpu/ci_smc.c

@@ -0,0 +1,279 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "cikd.h"
+#include "ppsmc.h"
+#include "amdgpu_ucode.h"
+#include "ci_dpm.h"
+
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
+
+static int ci_set_smc_sram_address(struct amdgpu_device *adev,
+				   u32 smc_address, u32 limit)
+{
+	if (smc_address & 3)
+		return -EINVAL;
+	if ((smc_address + 3) > limit)
+		return -EINVAL;
+
+	WREG32(mmSMC_IND_INDEX_0, smc_address);
+	WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
+
+	return 0;
+}
+
+int amdgpu_ci_copy_bytes_to_smc(struct amdgpu_device *adev,
+			 u32 smc_start_address,
+			 const u8 *src, u32 byte_count, u32 limit)
+{
+	unsigned long flags;
+	u32 data, original_data;
+	u32 addr;
+	u32 extra_shift;
+	int ret = 0;
+
+	if (smc_start_address & 3)
+		return -EINVAL;
+	if ((smc_start_address + byte_count) > limit)
+		return -EINVAL;
+
+	addr = smc_start_address;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	while (byte_count >= 4) {
+		/* SMC address space is BE */
+		data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+		ret = ci_set_smc_sram_address(adev, addr, limit);
+		if (ret)
+			goto done;
+
+		WREG32(mmSMC_IND_DATA_0, data);
+
+		src += 4;
+		byte_count -= 4;
+		addr += 4;
+	}
+
+	/* RMW for the final bytes */
+	if (byte_count > 0) {
+		data = 0;
+
+		ret = ci_set_smc_sram_address(adev, addr, limit);
+		if (ret)
+			goto done;
+
+		original_data = RREG32(mmSMC_IND_DATA_0);
+
+		extra_shift = 8 * (4 - byte_count);
+
+		while (byte_count > 0) {
+			data = (data << 8) + *src++;
+			byte_count--;
+		}
+
+		data <<= extra_shift;
+
+		data |= (original_data & ~((~0UL) << extra_shift));
+
+		ret = ci_set_smc_sram_address(adev, addr, limit);
+		if (ret)
+			goto done;
+
+		WREG32(mmSMC_IND_DATA_0, data);
+	}
+
+done:
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+	return ret;
+}
+
+void amdgpu_ci_start_smc(struct amdgpu_device *adev)
+{
+	u32 tmp = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+
+	tmp &= ~SMC_SYSCON_RESET_CNTL__rst_reg_MASK;
+	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, tmp);
+}
+
+void amdgpu_ci_reset_smc(struct amdgpu_device *adev)
+{
+	u32 tmp = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+
+	tmp |= SMC_SYSCON_RESET_CNTL__rst_reg_MASK;
+	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, tmp);
+}
+
+int amdgpu_ci_program_jump_on_start(struct amdgpu_device *adev)
+{
+	static u8 data[] = { 0xE0, 0x00, 0x80, 0x40 };
+
+	return amdgpu_ci_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
+}
+
+void amdgpu_ci_stop_smc_clock(struct amdgpu_device *adev)
+{
+	u32 tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+
+	tmp |= SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK;
+
+	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, tmp);
+}
+
+void amdgpu_ci_start_smc_clock(struct amdgpu_device *adev)
+{
+	u32 tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+
+	tmp &= ~SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK;
+
+	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, tmp);
+}
+
+bool amdgpu_ci_is_smc_running(struct amdgpu_device *adev)
+{
+	u32 clk = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+	u32 pc_c = RREG32_SMC(ixSMC_PC_C);
+
+	if (!(clk & SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK) && (0x20100 <= pc_c))
+		return true;
+
+	return false;
+}
+
+PPSMC_Result amdgpu_ci_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
+{
+	u32 tmp;
+	int i;
+
+	if (!amdgpu_ci_is_smc_running(adev))
+		return PPSMC_Result_Failed;
+
+	WREG32(mmSMC_MESSAGE_0, msg);
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		tmp = RREG32(mmSMC_RESP_0);
+		if (tmp != 0)
+			break;
+		udelay(1);
+	}
+	tmp = RREG32(mmSMC_RESP_0);
+
+	return (PPSMC_Result)tmp;
+}
+
+PPSMC_Result amdgpu_ci_wait_for_smc_inactive(struct amdgpu_device *adev)
+{
+	u32 tmp;
+	int i;
+
+	if (!amdgpu_ci_is_smc_running(adev))
+		return PPSMC_Result_OK;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+		if ((tmp & SMC_SYSCON_CLOCK_CNTL_0__cken_MASK) == 0)
+			break;
+		udelay(1);
+	}
+
+	return PPSMC_Result_OK;
+}
+
+int amdgpu_ci_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
+{
+	const struct smc_firmware_header_v1_0 *hdr;
+	unsigned long flags;
+	u32 ucode_start_address;
+	u32 ucode_size;
+	const u8 *src;
+	u32 data;
+
+	if (!adev->pm.fw)
+		return -EINVAL;
+
+	hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
+	amdgpu_ucode_print_smc_hdr(&hdr->header);
+
+	adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+	ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+	src = (const u8 *)
+		(adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+	if (ucode_size & 3)
+		return -EINVAL;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
+	WREG32_P(mmSMC_IND_ACCESS_CNTL, SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK,
+		~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
+	while (ucode_size >= 4) {
+		/* SMC address space is BE */
+		data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+		WREG32(mmSMC_IND_DATA_0, data);
+
+		src += 4;
+		ucode_size -= 4;
+	}
+	WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+	return 0;
+}
+
+int amdgpu_ci_read_smc_sram_dword(struct amdgpu_device *adev,
+			   u32 smc_address, u32 *value, u32 limit)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	ret = ci_set_smc_sram_address(adev, smc_address, limit);
+	if (ret == 0)
+		*value = RREG32(mmSMC_IND_DATA_0);
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+	return ret;
+}
+
+int amdgpu_ci_write_smc_sram_dword(struct amdgpu_device *adev,
+			    u32 smc_address, u32 value, u32 limit)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	ret = ci_set_smc_sram_address(adev, smc_address, limit);
+	if (ret == 0)
+		WREG32(mmSMC_IND_DATA_0, value);
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+	return ret;
+}

+ 2513 - 0
drivers/gpu/drm/amd/amdgpu/cik.c

@@ -0,0 +1,2513 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "amdgpu_ih.h"
+#include "amdgpu_uvd.h"
+#include "amdgpu_vce.h"
+#include "cikd.h"
+#include "atom.h"
+
+#include "cik.h"
+#include "gmc_v7_0.h"
+#include "cik_ih.h"
+#include "dce_v8_0.h"
+#include "gfx_v7_0.h"
+#include "cik_sdma.h"
+#include "uvd_v4_2.h"
+#include "vce_v2_0.h"
+#include "cik_dpm.h"
+
+#include "uvd/uvd_4_2_d.h"
+
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "bif/bif_4_1_d.h"
+#include "bif/bif_4_1_sh_mask.h"
+
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_enum.h"
+#include "gca/gfx_7_2_sh_mask.h"
+
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+
+#include "oss/oss_2_0_d.h"
+#include "oss/oss_2_0_sh_mask.h"
+
+/*
+ * Indirect registers accessor
+ */
+static u32 cik_pcie_rreg(struct amdgpu_device *adev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(mmPCIE_INDEX, reg);
+	(void)RREG32(mmPCIE_INDEX);
+	r = RREG32(mmPCIE_DATA);
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+	return r;
+}
+
+static void cik_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+	WREG32(mmPCIE_INDEX, reg);
+	(void)RREG32(mmPCIE_INDEX);
+	WREG32(mmPCIE_DATA, v);
+	(void)RREG32(mmPCIE_DATA);
+	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+static u32 cik_smc_rreg(struct amdgpu_device *adev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	WREG32(mmSMC_IND_INDEX_0, (reg));
+	r = RREG32(mmSMC_IND_DATA_0);
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+	return r;
+}
+
+static void cik_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	WREG32(mmSMC_IND_INDEX_0, (reg));
+	WREG32(mmSMC_IND_DATA_0, (v));
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+}
+
+static u32 cik_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
+	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
+	r = RREG32(mmUVD_CTX_DATA);
+	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
+	return r;
+}
+
+static void cik_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
+	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
+	WREG32(mmUVD_CTX_DATA, (v));
+	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
+}
+
+static u32 cik_didt_rreg(struct amdgpu_device *adev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&adev->didt_idx_lock, flags);
+	WREG32(mmDIDT_IND_INDEX, (reg));
+	r = RREG32(mmDIDT_IND_DATA);
+	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
+	return r;
+}
+
+static void cik_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->didt_idx_lock, flags);
+	WREG32(mmDIDT_IND_INDEX, (reg));
+	WREG32(mmDIDT_IND_DATA, (v));
+	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
+}
+
+static const u32 bonaire_golden_spm_registers[] =
+{
+	0xc200, 0xe0ffffff, 0xe0000000
+};
+
+static const u32 bonaire_golden_common_registers[] =
+{
+	0x31dc, 0xffffffff, 0x00000800,
+	0x31dd, 0xffffffff, 0x00000800,
+	0x31e6, 0xffffffff, 0x00007fbf,
+	0x31e7, 0xffffffff, 0x00007faf
+};
+
+static const u32 bonaire_golden_registers[] =
+{
+	0xcd5, 0x00000333, 0x00000333,
+	0xcd4, 0x000c0fc0, 0x00040200,
+	0x2684, 0x00010000, 0x00058208,
+	0xf000, 0xffff1fff, 0x00140000,
+	0xf080, 0xfdfc0fff, 0x00000100,
+	0xf08d, 0x40000000, 0x40000200,
+	0x260c, 0xffffffff, 0x00000000,
+	0x260d, 0xf00fffff, 0x00000400,
+	0x260e, 0x0002021c, 0x00020200,
+	0x31e, 0x00000080, 0x00000000,
+	0x16ec, 0x000000f0, 0x00000070,
+	0x16f0, 0xf0311fff, 0x80300000,
+	0x263e, 0x73773777, 0x12010001,
+	0xd43, 0x00810000, 0x408af000,
+	0x1c0c, 0x31000111, 0x00000011,
+	0xbd2, 0x73773777, 0x12010001,
+	0x883, 0x00007fb6, 0x0021a1b1,
+	0x884, 0x00007fb6, 0x002021b1,
+	0x860, 0x00007fb6, 0x00002191,
+	0x886, 0x00007fb6, 0x002121b1,
+	0x887, 0x00007fb6, 0x002021b1,
+	0x877, 0x00007fb6, 0x00002191,
+	0x878, 0x00007fb6, 0x00002191,
+	0xd8a, 0x0000003f, 0x0000000a,
+	0xd8b, 0x0000003f, 0x0000000a,
+	0xab9, 0x00073ffe, 0x000022a2,
+	0x903, 0x000007ff, 0x00000000,
+	0x2285, 0xf000003f, 0x00000007,
+	0x22fc, 0x00002001, 0x00000001,
+	0x22c9, 0xffffffff, 0x00ffffff,
+	0xc281, 0x0000ff0f, 0x00000000,
+	0xa293, 0x07ffffff, 0x06000000,
+	0x136, 0x00000fff, 0x00000100,
+	0xf9e, 0x00000001, 0x00000002,
+	0x2440, 0x03000000, 0x0362c688,
+	0x2300, 0x000000ff, 0x00000001,
+	0x390, 0x00001fff, 0x00001fff,
+	0x2418, 0x0000007f, 0x00000020,
+	0x2542, 0x00010000, 0x00010000,
+	0x2b05, 0x000003ff, 0x000000f3,
+	0x2b03, 0xffffffff, 0x00001032
+};
+
+static const u32 bonaire_mgcg_cgcg_init[] =
+{
+	0x3108, 0xffffffff, 0xfffffffc,
+	0xc200, 0xffffffff, 0xe0000000,
+	0xf0a8, 0xffffffff, 0x00000100,
+	0xf082, 0xffffffff, 0x00000100,
+	0xf0b0, 0xffffffff, 0xc0000100,
+	0xf0b2, 0xffffffff, 0xc0000100,
+	0xf0b1, 0xffffffff, 0xc0000100,
+	0x1579, 0xffffffff, 0x00600100,
+	0xf0a0, 0xffffffff, 0x00000100,
+	0xf085, 0xffffffff, 0x06000100,
+	0xf088, 0xffffffff, 0x00000100,
+	0xf086, 0xffffffff, 0x06000100,
+	0xf081, 0xffffffff, 0x00000100,
+	0xf0b8, 0xffffffff, 0x00000100,
+	0xf089, 0xffffffff, 0x00000100,
+	0xf080, 0xffffffff, 0x00000100,
+	0xf08c, 0xffffffff, 0x00000100,
+	0xf08d, 0xffffffff, 0x00000100,
+	0xf094, 0xffffffff, 0x00000100,
+	0xf095, 0xffffffff, 0x00000100,
+	0xf096, 0xffffffff, 0x00000100,
+	0xf097, 0xffffffff, 0x00000100,
+	0xf098, 0xffffffff, 0x00000100,
+	0xf09f, 0xffffffff, 0x00000100,
+	0xf09e, 0xffffffff, 0x00000100,
+	0xf084, 0xffffffff, 0x06000100,
+	0xf0a4, 0xffffffff, 0x00000100,
+	0xf09d, 0xffffffff, 0x00000100,
+	0xf0ad, 0xffffffff, 0x00000100,
+	0xf0ac, 0xffffffff, 0x00000100,
+	0xf09c, 0xffffffff, 0x00000100,
+	0xc200, 0xffffffff, 0xe0000000,
+	0xf008, 0xffffffff, 0x00010000,
+	0xf009, 0xffffffff, 0x00030002,
+	0xf00a, 0xffffffff, 0x00040007,
+	0xf00b, 0xffffffff, 0x00060005,
+	0xf00c, 0xffffffff, 0x00090008,
+	0xf00d, 0xffffffff, 0x00010000,
+	0xf00e, 0xffffffff, 0x00030002,
+	0xf00f, 0xffffffff, 0x00040007,
+	0xf010, 0xffffffff, 0x00060005,
+	0xf011, 0xffffffff, 0x00090008,
+	0xf012, 0xffffffff, 0x00010000,
+	0xf013, 0xffffffff, 0x00030002,
+	0xf014, 0xffffffff, 0x00040007,
+	0xf015, 0xffffffff, 0x00060005,
+	0xf016, 0xffffffff, 0x00090008,
+	0xf017, 0xffffffff, 0x00010000,
+	0xf018, 0xffffffff, 0x00030002,
+	0xf019, 0xffffffff, 0x00040007,
+	0xf01a, 0xffffffff, 0x00060005,
+	0xf01b, 0xffffffff, 0x00090008,
+	0xf01c, 0xffffffff, 0x00010000,
+	0xf01d, 0xffffffff, 0x00030002,
+	0xf01e, 0xffffffff, 0x00040007,
+	0xf01f, 0xffffffff, 0x00060005,
+	0xf020, 0xffffffff, 0x00090008,
+	0xf021, 0xffffffff, 0x00010000,
+	0xf022, 0xffffffff, 0x00030002,
+	0xf023, 0xffffffff, 0x00040007,
+	0xf024, 0xffffffff, 0x00060005,
+	0xf025, 0xffffffff, 0x00090008,
+	0xf026, 0xffffffff, 0x00010000,
+	0xf027, 0xffffffff, 0x00030002,
+	0xf028, 0xffffffff, 0x00040007,
+	0xf029, 0xffffffff, 0x00060005,
+	0xf02a, 0xffffffff, 0x00090008,
+	0xf000, 0xffffffff, 0x96e00200,
+	0x21c2, 0xffffffff, 0x00900100,
+	0x3109, 0xffffffff, 0x0020003f,
+	0xe, 0xffffffff, 0x0140001c,
+	0xf, 0x000f0000, 0x000f0000,
+	0x88, 0xffffffff, 0xc060000c,
+	0x89, 0xc0000fff, 0x00000100,
+	0x3e4, 0xffffffff, 0x00000100,
+	0x3e6, 0x00000101, 0x00000000,
+	0x82a, 0xffffffff, 0x00000104,
+	0x1579, 0xff000fff, 0x00000100,
+	0xc33, 0xc0000fff, 0x00000104,
+	0x3079, 0x00000001, 0x00000001,
+	0x3403, 0xff000ff0, 0x00000100,
+	0x3603, 0xff000ff0, 0x00000100
+};
+
+static const u32 spectre_golden_spm_registers[] =
+{
+	0xc200, 0xe0ffffff, 0xe0000000
+};
+
+static const u32 spectre_golden_common_registers[] =
+{
+	0x31dc, 0xffffffff, 0x00000800,
+	0x31dd, 0xffffffff, 0x00000800,
+	0x31e6, 0xffffffff, 0x00007fbf,
+	0x31e7, 0xffffffff, 0x00007faf
+};
+
+static const u32 spectre_golden_registers[] =
+{
+	0xf000, 0xffff1fff, 0x96940200,
+	0xf003, 0xffff0001, 0xff000000,
+	0xf080, 0xfffc0fff, 0x00000100,
+	0x1bb6, 0x00010101, 0x00010000,
+	0x260d, 0xf00fffff, 0x00000400,
+	0x260e, 0xfffffffc, 0x00020200,
+	0x16ec, 0x000000f0, 0x00000070,
+	0x16f0, 0xf0311fff, 0x80300000,
+	0x263e, 0x73773777, 0x12010001,
+	0x26df, 0x00ff0000, 0x00fc0000,
+	0xbd2, 0x73773777, 0x12010001,
+	0x2285, 0xf000003f, 0x00000007,
+	0x22c9, 0xffffffff, 0x00ffffff,
+	0xa0d4, 0x3f3f3fff, 0x00000082,
+	0xa0d5, 0x0000003f, 0x00000000,
+	0xf9e, 0x00000001, 0x00000002,
+	0x244f, 0xffff03df, 0x00000004,
+	0x31da, 0x00000008, 0x00000008,
+	0x2300, 0x000008ff, 0x00000800,
+	0x2542, 0x00010000, 0x00010000,
+	0x2b03, 0xffffffff, 0x54763210,
+	0x853e, 0x01ff01ff, 0x00000002,
+	0x8526, 0x007ff800, 0x00200000,
+	0x8057, 0xffffffff, 0x00000f40,
+	0xc24d, 0xffffffff, 0x00000001
+};
+
+static const u32 spectre_mgcg_cgcg_init[] =
+{
+	0x3108, 0xffffffff, 0xfffffffc,
+	0xc200, 0xffffffff, 0xe0000000,
+	0xf0a8, 0xffffffff, 0x00000100,
+	0xf082, 0xffffffff, 0x00000100,
+	0xf0b0, 0xffffffff, 0x00000100,
+	0xf0b2, 0xffffffff, 0x00000100,
+	0xf0b1, 0xffffffff, 0x00000100,
+	0x1579, 0xffffffff, 0x00600100,
+	0xf0a0, 0xffffffff, 0x00000100,
+	0xf085, 0xffffffff, 0x06000100,
+	0xf088, 0xffffffff, 0x00000100,
+	0xf086, 0xffffffff, 0x06000100,
+	0xf081, 0xffffffff, 0x00000100,
+	0xf0b8, 0xffffffff, 0x00000100,
+	0xf089, 0xffffffff, 0x00000100,
+	0xf080, 0xffffffff, 0x00000100,
+	0xf08c, 0xffffffff, 0x00000100,
+	0xf08d, 0xffffffff, 0x00000100,
+	0xf094, 0xffffffff, 0x00000100,
+	0xf095, 0xffffffff, 0x00000100,
+	0xf096, 0xffffffff, 0x00000100,
+	0xf097, 0xffffffff, 0x00000100,
+	0xf098, 0xffffffff, 0x00000100,
+	0xf09f, 0xffffffff, 0x00000100,
+	0xf09e, 0xffffffff, 0x00000100,
+	0xf084, 0xffffffff, 0x06000100,
+	0xf0a4, 0xffffffff, 0x00000100,
+	0xf09d, 0xffffffff, 0x00000100,
+	0xf0ad, 0xffffffff, 0x00000100,
+	0xf0ac, 0xffffffff, 0x00000100,
+	0xf09c, 0xffffffff, 0x00000100,
+	0xc200, 0xffffffff, 0xe0000000,
+	0xf008, 0xffffffff, 0x00010000,
+	0xf009, 0xffffffff, 0x00030002,
+	0xf00a, 0xffffffff, 0x00040007,
+	0xf00b, 0xffffffff, 0x00060005,
+	0xf00c, 0xffffffff, 0x00090008,
+	0xf00d, 0xffffffff, 0x00010000,
+	0xf00e, 0xffffffff, 0x00030002,
+	0xf00f, 0xffffffff, 0x00040007,
+	0xf010, 0xffffffff, 0x00060005,
+	0xf011, 0xffffffff, 0x00090008,
+	0xf012, 0xffffffff, 0x00010000,
+	0xf013, 0xffffffff, 0x00030002,
+	0xf014, 0xffffffff, 0x00040007,
+	0xf015, 0xffffffff, 0x00060005,
+	0xf016, 0xffffffff, 0x00090008,
+	0xf017, 0xffffffff, 0x00010000,
+	0xf018, 0xffffffff, 0x00030002,
+	0xf019, 0xffffffff, 0x00040007,
+	0xf01a, 0xffffffff, 0x00060005,
+	0xf01b, 0xffffffff, 0x00090008,
+	0xf01c, 0xffffffff, 0x00010000,
+	0xf01d, 0xffffffff, 0x00030002,
+	0xf01e, 0xffffffff, 0x00040007,
+	0xf01f, 0xffffffff, 0x00060005,
+	0xf020, 0xffffffff, 0x00090008,
+	0xf021, 0xffffffff, 0x00010000,
+	0xf022, 0xffffffff, 0x00030002,
+	0xf023, 0xffffffff, 0x00040007,
+	0xf024, 0xffffffff, 0x00060005,
+	0xf025, 0xffffffff, 0x00090008,
+	0xf026, 0xffffffff, 0x00010000,
+	0xf027, 0xffffffff, 0x00030002,
+	0xf028, 0xffffffff, 0x00040007,
+	0xf029, 0xffffffff, 0x00060005,
+	0xf02a, 0xffffffff, 0x00090008,
+	0xf02b, 0xffffffff, 0x00010000,
+	0xf02c, 0xffffffff, 0x00030002,
+	0xf02d, 0xffffffff, 0x00040007,
+	0xf02e, 0xffffffff, 0x00060005,
+	0xf02f, 0xffffffff, 0x00090008,
+	0xf000, 0xffffffff, 0x96e00200,
+	0x21c2, 0xffffffff, 0x00900100,
+	0x3109, 0xffffffff, 0x0020003f,
+	0xe, 0xffffffff, 0x0140001c,
+	0xf, 0x000f0000, 0x000f0000,
+	0x88, 0xffffffff, 0xc060000c,
+	0x89, 0xc0000fff, 0x00000100,
+	0x3e4, 0xffffffff, 0x00000100,
+	0x3e6, 0x00000101, 0x00000000,
+	0x82a, 0xffffffff, 0x00000104,
+	0x1579, 0xff000fff, 0x00000100,
+	0xc33, 0xc0000fff, 0x00000104,
+	0x3079, 0x00000001, 0x00000001,
+	0x3403, 0xff000ff0, 0x00000100,
+	0x3603, 0xff000ff0, 0x00000100
+};
+
+static const u32 kalindi_golden_spm_registers[] =
+{
+	0xc200, 0xe0ffffff, 0xe0000000
+};
+
+static const u32 kalindi_golden_common_registers[] =
+{
+	0x31dc, 0xffffffff, 0x00000800,
+	0x31dd, 0xffffffff, 0x00000800,
+	0x31e6, 0xffffffff, 0x00007fbf,
+	0x31e7, 0xffffffff, 0x00007faf
+};
+
+static const u32 kalindi_golden_registers[] =
+{
+	0xf000, 0xffffdfff, 0x6e944040,
+	0x1579, 0xff607fff, 0xfc000100,
+	0xf088, 0xff000fff, 0x00000100,
+	0xf089, 0xff000fff, 0x00000100,
+	0xf080, 0xfffc0fff, 0x00000100,
+	0x1bb6, 0x00010101, 0x00010000,
+	0x260c, 0xffffffff, 0x00000000,
+	0x260d, 0xf00fffff, 0x00000400,
+	0x16ec, 0x000000f0, 0x00000070,
+	0x16f0, 0xf0311fff, 0x80300000,
+	0x263e, 0x73773777, 0x12010001,
+	0x263f, 0xffffffff, 0x00000010,
+	0x26df, 0x00ff0000, 0x00fc0000,
+	0x200c, 0x00001f0f, 0x0000100a,
+	0xbd2, 0x73773777, 0x12010001,
+	0x902, 0x000fffff, 0x000c007f,
+	0x2285, 0xf000003f, 0x00000007,
+	0x22c9, 0x3fff3fff, 0x00ffcfff,
+	0xc281, 0x0000ff0f, 0x00000000,
+	0xa293, 0x07ffffff, 0x06000000,
+	0x136, 0x00000fff, 0x00000100,
+	0xf9e, 0x00000001, 0x00000002,
+	0x31da, 0x00000008, 0x00000008,
+	0x2300, 0x000000ff, 0x00000003,
+	0x853e, 0x01ff01ff, 0x00000002,
+	0x8526, 0x007ff800, 0x00200000,
+	0x8057, 0xffffffff, 0x00000f40,
+	0x2231, 0x001f3ae3, 0x00000082,
+	0x2235, 0x0000001f, 0x00000010,
+	0xc24d, 0xffffffff, 0x00000000
+};
+
+static const u32 kalindi_mgcg_cgcg_init[] =
+{
+	0x3108, 0xffffffff, 0xfffffffc,
+	0xc200, 0xffffffff, 0xe0000000,
+	0xf0a8, 0xffffffff, 0x00000100,
+	0xf082, 0xffffffff, 0x00000100,
+	0xf0b0, 0xffffffff, 0x00000100,
+	0xf0b2, 0xffffffff, 0x00000100,
+	0xf0b1, 0xffffffff, 0x00000100,
+	0x1579, 0xffffffff, 0x00600100,
+	0xf0a0, 0xffffffff, 0x00000100,
+	0xf085, 0xffffffff, 0x06000100,
+	0xf088, 0xffffffff, 0x00000100,
+	0xf086, 0xffffffff, 0x06000100,
+	0xf081, 0xffffffff, 0x00000100,
+	0xf0b8, 0xffffffff, 0x00000100,
+	0xf089, 0xffffffff, 0x00000100,
+	0xf080, 0xffffffff, 0x00000100,
+	0xf08c, 0xffffffff, 0x00000100,
+	0xf08d, 0xffffffff, 0x00000100,
+	0xf094, 0xffffffff, 0x00000100,
+	0xf095, 0xffffffff, 0x00000100,
+	0xf096, 0xffffffff, 0x00000100,
+	0xf097, 0xffffffff, 0x00000100,
+	0xf098, 0xffffffff, 0x00000100,
+	0xf09f, 0xffffffff, 0x00000100,
+	0xf09e, 0xffffffff, 0x00000100,
+	0xf084, 0xffffffff, 0x06000100,
+	0xf0a4, 0xffffffff, 0x00000100,
+	0xf09d, 0xffffffff, 0x00000100,
+	0xf0ad, 0xffffffff, 0x00000100,
+	0xf0ac, 0xffffffff, 0x00000100,
+	0xf09c, 0xffffffff, 0x00000100,
+	0xc200, 0xffffffff, 0xe0000000,
+	0xf008, 0xffffffff, 0x00010000,
+	0xf009, 0xffffffff, 0x00030002,
+	0xf00a, 0xffffffff, 0x00040007,
+	0xf00b, 0xffffffff, 0x00060005,
+	0xf00c, 0xffffffff, 0x00090008,
+	0xf00d, 0xffffffff, 0x00010000,
+	0xf00e, 0xffffffff, 0x00030002,
+	0xf00f, 0xffffffff, 0x00040007,
+	0xf010, 0xffffffff, 0x00060005,
+	0xf011, 0xffffffff, 0x00090008,
+	0xf000, 0xffffffff, 0x96e00200,
+	0x21c2, 0xffffffff, 0x00900100,
+	0x3109, 0xffffffff, 0x0020003f,
+	0xe, 0xffffffff, 0x0140001c,
+	0xf, 0x000f0000, 0x000f0000,
+	0x88, 0xffffffff, 0xc060000c,
+	0x89, 0xc0000fff, 0x00000100,
+	0x82a, 0xffffffff, 0x00000104,
+	0x1579, 0xff000fff, 0x00000100,
+	0xc33, 0xc0000fff, 0x00000104,
+	0x3079, 0x00000001, 0x00000001,
+	0x3403, 0xff000ff0, 0x00000100,
+	0x3603, 0xff000ff0, 0x00000100
+};
+
+static const u32 hawaii_golden_spm_registers[] =
+{
+	0xc200, 0xe0ffffff, 0xe0000000
+};
+
+static const u32 hawaii_golden_common_registers[] =
+{
+	0xc200, 0xffffffff, 0xe0000000,
+	0xa0d4, 0xffffffff, 0x3a00161a,
+	0xa0d5, 0xffffffff, 0x0000002e,
+	0x2684, 0xffffffff, 0x00018208,
+	0x263e, 0xffffffff, 0x12011003
+};
+
+static const u32 hawaii_golden_registers[] =
+{
+	0xcd5, 0x00000333, 0x00000333,
+	0x2684, 0x00010000, 0x00058208,
+	0x260c, 0xffffffff, 0x00000000,
+	0x260d, 0xf00fffff, 0x00000400,
+	0x260e, 0x0002021c, 0x00020200,
+	0x31e, 0x00000080, 0x00000000,
+	0x16ec, 0x000000f0, 0x00000070,
+	0x16f0, 0xf0311fff, 0x80300000,
+	0xd43, 0x00810000, 0x408af000,
+	0x1c0c, 0x31000111, 0x00000011,
+	0xbd2, 0x73773777, 0x12010001,
+	0x848, 0x0000007f, 0x0000001b,
+	0x877, 0x00007fb6, 0x00002191,
+	0xd8a, 0x0000003f, 0x0000000a,
+	0xd8b, 0x0000003f, 0x0000000a,
+	0xab9, 0x00073ffe, 0x000022a2,
+	0x903, 0x000007ff, 0x00000000,
+	0x22fc, 0x00002001, 0x00000001,
+	0x22c9, 0xffffffff, 0x00ffffff,
+	0xc281, 0x0000ff0f, 0x00000000,
+	0xa293, 0x07ffffff, 0x06000000,
+	0xf9e, 0x00000001, 0x00000002,
+	0x31da, 0x00000008, 0x00000008,
+	0x31dc, 0x00000f00, 0x00000800,
+	0x31dd, 0x00000f00, 0x00000800,
+	0x31e6, 0x00ffffff, 0x00ff7fbf,
+	0x31e7, 0x00ffffff, 0x00ff7faf,
+	0x2300, 0x000000ff, 0x00000800,
+	0x390, 0x00001fff, 0x00001fff,
+	0x2418, 0x0000007f, 0x00000020,
+	0x2542, 0x00010000, 0x00010000,
+	0x2b80, 0x00100000, 0x000ff07c,
+	0x2b05, 0x000003ff, 0x0000000f,
+	0x2b04, 0xffffffff, 0x7564fdec,
+	0x2b03, 0xffffffff, 0x3120b9a8,
+	0x2b02, 0x20000000, 0x0f9c0000
+};
+
+static const u32 hawaii_mgcg_cgcg_init[] =
+{
+	0x3108, 0xffffffff, 0xfffffffd,
+	0xc200, 0xffffffff, 0xe0000000,
+	0xf0a8, 0xffffffff, 0x00000100,
+	0xf082, 0xffffffff, 0x00000100,
+	0xf0b0, 0xffffffff, 0x00000100,
+	0xf0b2, 0xffffffff, 0x00000100,
+	0xf0b1, 0xffffffff, 0x00000100,
+	0x1579, 0xffffffff, 0x00200100,
+	0xf0a0, 0xffffffff, 0x00000100,
+	0xf085, 0xffffffff, 0x06000100,
+	0xf088, 0xffffffff, 0x00000100,
+	0xf086, 0xffffffff, 0x06000100,
+	0xf081, 0xffffffff, 0x00000100,
+	0xf0b8, 0xffffffff, 0x00000100,
+	0xf089, 0xffffffff, 0x00000100,
+	0xf080, 0xffffffff, 0x00000100,
+	0xf08c, 0xffffffff, 0x00000100,
+	0xf08d, 0xffffffff, 0x00000100,
+	0xf094, 0xffffffff, 0x00000100,
+	0xf095, 0xffffffff, 0x00000100,
+	0xf096, 0xffffffff, 0x00000100,
+	0xf097, 0xffffffff, 0x00000100,
+	0xf098, 0xffffffff, 0x00000100,
+	0xf09f, 0xffffffff, 0x00000100,
+	0xf09e, 0xffffffff, 0x00000100,
+	0xf084, 0xffffffff, 0x06000100,
+	0xf0a4, 0xffffffff, 0x00000100,
+	0xf09d, 0xffffffff, 0x00000100,
+	0xf0ad, 0xffffffff, 0x00000100,
+	0xf0ac, 0xffffffff, 0x00000100,
+	0xf09c, 0xffffffff, 0x00000100,
+	0xc200, 0xffffffff, 0xe0000000,
+	0xf008, 0xffffffff, 0x00010000,
+	0xf009, 0xffffffff, 0x00030002,
+	0xf00a, 0xffffffff, 0x00040007,
+	0xf00b, 0xffffffff, 0x00060005,
+	0xf00c, 0xffffffff, 0x00090008,
+	0xf00d, 0xffffffff, 0x00010000,
+	0xf00e, 0xffffffff, 0x00030002,
+	0xf00f, 0xffffffff, 0x00040007,
+	0xf010, 0xffffffff, 0x00060005,
+	0xf011, 0xffffffff, 0x00090008,
+	0xf012, 0xffffffff, 0x00010000,
+	0xf013, 0xffffffff, 0x00030002,
+	0xf014, 0xffffffff, 0x00040007,
+	0xf015, 0xffffffff, 0x00060005,
+	0xf016, 0xffffffff, 0x00090008,
+	0xf017, 0xffffffff, 0x00010000,
+	0xf018, 0xffffffff, 0x00030002,
+	0xf019, 0xffffffff, 0x00040007,
+	0xf01a, 0xffffffff, 0x00060005,
+	0xf01b, 0xffffffff, 0x00090008,
+	0xf01c, 0xffffffff, 0x00010000,
+	0xf01d, 0xffffffff, 0x00030002,
+	0xf01e, 0xffffffff, 0x00040007,
+	0xf01f, 0xffffffff, 0x00060005,
+	0xf020, 0xffffffff, 0x00090008,
+	0xf021, 0xffffffff, 0x00010000,
+	0xf022, 0xffffffff, 0x00030002,
+	0xf023, 0xffffffff, 0x00040007,
+	0xf024, 0xffffffff, 0x00060005,
+	0xf025, 0xffffffff, 0x00090008,
+	0xf026, 0xffffffff, 0x00010000,
+	0xf027, 0xffffffff, 0x00030002,
+	0xf028, 0xffffffff, 0x00040007,
+	0xf029, 0xffffffff, 0x00060005,
+	0xf02a, 0xffffffff, 0x00090008,
+	0xf02b, 0xffffffff, 0x00010000,
+	0xf02c, 0xffffffff, 0x00030002,
+	0xf02d, 0xffffffff, 0x00040007,
+	0xf02e, 0xffffffff, 0x00060005,
+	0xf02f, 0xffffffff, 0x00090008,
+	0xf030, 0xffffffff, 0x00010000,
+	0xf031, 0xffffffff, 0x00030002,
+	0xf032, 0xffffffff, 0x00040007,
+	0xf033, 0xffffffff, 0x00060005,
+	0xf034, 0xffffffff, 0x00090008,
+	0xf035, 0xffffffff, 0x00010000,
+	0xf036, 0xffffffff, 0x00030002,
+	0xf037, 0xffffffff, 0x00040007,
+	0xf038, 0xffffffff, 0x00060005,
+	0xf039, 0xffffffff, 0x00090008,
+	0xf03a, 0xffffffff, 0x00010000,
+	0xf03b, 0xffffffff, 0x00030002,
+	0xf03c, 0xffffffff, 0x00040007,
+	0xf03d, 0xffffffff, 0x00060005,
+	0xf03e, 0xffffffff, 0x00090008,
+	0x30c6, 0xffffffff, 0x00020200,
+	0xcd4, 0xffffffff, 0x00000200,
+	0x570, 0xffffffff, 0x00000400,
+	0x157a, 0xffffffff, 0x00000000,
+	0xbd4, 0xffffffff, 0x00000902,
+	0xf000, 0xffffffff, 0x96940200,
+	0x21c2, 0xffffffff, 0x00900100,
+	0x3109, 0xffffffff, 0x0020003f,
+	0xe, 0xffffffff, 0x0140001c,
+	0xf, 0x000f0000, 0x000f0000,
+	0x88, 0xffffffff, 0xc060000c,
+	0x89, 0xc0000fff, 0x00000100,
+	0x3e4, 0xffffffff, 0x00000100,
+	0x3e6, 0x00000101, 0x00000000,
+	0x82a, 0xffffffff, 0x00000104,
+	0x1579, 0xff000fff, 0x00000100,
+	0xc33, 0xc0000fff, 0x00000104,
+	0x3079, 0x00000001, 0x00000001,
+	0x3403, 0xff000ff0, 0x00000100,
+	0x3603, 0xff000ff0, 0x00000100
+};
+
+static const u32 godavari_golden_registers[] =
+{
+	0x1579, 0xff607fff, 0xfc000100,
+	0x1bb6, 0x00010101, 0x00010000,
+	0x260c, 0xffffffff, 0x00000000,
+	0x260c0, 0xf00fffff, 0x00000400,
+	0x184c, 0xffffffff, 0x00010000,
+	0x16ec, 0x000000f0, 0x00000070,
+	0x16f0, 0xf0311fff, 0x80300000,
+	0x263e, 0x73773777, 0x12010001,
+	0x263f, 0xffffffff, 0x00000010,
+	0x200c, 0x00001f0f, 0x0000100a,
+	0xbd2, 0x73773777, 0x12010001,
+	0x902, 0x000fffff, 0x000c007f,
+	0x2285, 0xf000003f, 0x00000007,
+	0x22c9, 0xffffffff, 0x00ff0fff,
+	0xc281, 0x0000ff0f, 0x00000000,
+	0xa293, 0x07ffffff, 0x06000000,
+	0x136, 0x00000fff, 0x00000100,
+	0x3405, 0x00010000, 0x00810001,
+	0x3605, 0x00010000, 0x00810001,
+	0xf9e, 0x00000001, 0x00000002,
+	0x31da, 0x00000008, 0x00000008,
+	0x31dc, 0x00000f00, 0x00000800,
+	0x31dd, 0x00000f00, 0x00000800,
+	0x31e6, 0x00ffffff, 0x00ff7fbf,
+	0x31e7, 0x00ffffff, 0x00ff7faf,
+	0x2300, 0x000000ff, 0x00000001,
+	0x853e, 0x01ff01ff, 0x00000002,
+	0x8526, 0x007ff800, 0x00200000,
+	0x8057, 0xffffffff, 0x00000f40,
+	0x2231, 0x001f3ae3, 0x00000082,
+	0x2235, 0x0000001f, 0x00000010,
+	0xc24d, 0xffffffff, 0x00000000
+};
+
+static void cik_init_golden_registers(struct amdgpu_device *adev)
+{
+	/* Some of the registers might be dependent on GRBM_GFX_INDEX */
+	mutex_lock(&adev->grbm_idx_mutex);
+
+	switch (adev->asic_type) {
+	case CHIP_BONAIRE:
+		amdgpu_program_register_sequence(adev,
+						 bonaire_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(bonaire_mgcg_cgcg_init));
+		amdgpu_program_register_sequence(adev,
+						 bonaire_golden_registers,
+						 (const u32)ARRAY_SIZE(bonaire_golden_registers));
+		amdgpu_program_register_sequence(adev,
+						 bonaire_golden_common_registers,
+						 (const u32)ARRAY_SIZE(bonaire_golden_common_registers));
+		amdgpu_program_register_sequence(adev,
+						 bonaire_golden_spm_registers,
+						 (const u32)ARRAY_SIZE(bonaire_golden_spm_registers));
+		break;
+	case CHIP_KABINI:
+		amdgpu_program_register_sequence(adev,
+						 kalindi_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
+		amdgpu_program_register_sequence(adev,
+						 kalindi_golden_registers,
+						 (const u32)ARRAY_SIZE(kalindi_golden_registers));
+		amdgpu_program_register_sequence(adev,
+						 kalindi_golden_common_registers,
+						 (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
+		amdgpu_program_register_sequence(adev,
+						 kalindi_golden_spm_registers,
+						 (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
+		break;
+	case CHIP_MULLINS:
+		amdgpu_program_register_sequence(adev,
+						 kalindi_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
+		amdgpu_program_register_sequence(adev,
+						 godavari_golden_registers,
+						 (const u32)ARRAY_SIZE(godavari_golden_registers));
+		amdgpu_program_register_sequence(adev,
+						 kalindi_golden_common_registers,
+						 (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
+		amdgpu_program_register_sequence(adev,
+						 kalindi_golden_spm_registers,
+						 (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
+		break;
+	case CHIP_KAVERI:
+		amdgpu_program_register_sequence(adev,
+						 spectre_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(spectre_mgcg_cgcg_init));
+		amdgpu_program_register_sequence(adev,
+						 spectre_golden_registers,
+						 (const u32)ARRAY_SIZE(spectre_golden_registers));
+		amdgpu_program_register_sequence(adev,
+						 spectre_golden_common_registers,
+						 (const u32)ARRAY_SIZE(spectre_golden_common_registers));
+		amdgpu_program_register_sequence(adev,
+						 spectre_golden_spm_registers,
+						 (const u32)ARRAY_SIZE(spectre_golden_spm_registers));
+		break;
+	case CHIP_HAWAII:
+		amdgpu_program_register_sequence(adev,
+						 hawaii_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(hawaii_mgcg_cgcg_init));
+		amdgpu_program_register_sequence(adev,
+						 hawaii_golden_registers,
+						 (const u32)ARRAY_SIZE(hawaii_golden_registers));
+		amdgpu_program_register_sequence(adev,
+						 hawaii_golden_common_registers,
+						 (const u32)ARRAY_SIZE(hawaii_golden_common_registers));
+		amdgpu_program_register_sequence(adev,
+						 hawaii_golden_spm_registers,
+						 (const u32)ARRAY_SIZE(hawaii_golden_spm_registers));
+		break;
+	default:
+		break;
+	}
+	mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+/**
+ * cik_get_xclk - get the xclk
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (CIK).
+ */
+static u32 cik_get_xclk(struct amdgpu_device *adev)
+{
+	u32 reference_clock = adev->clock.spll.reference_freq;
+
+	if (adev->flags & AMDGPU_IS_APU) {
+		if (RREG32_SMC(ixGENERAL_PWRMGT) & GENERAL_PWRMGT__GPU_COUNTER_CLK_MASK)
+			return reference_clock / 2;
+	} else {
+		if (RREG32_SMC(ixCG_CLKPIN_CNTL) & CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK)
+			return reference_clock / 4;
+	}
+	return reference_clock;
+}
+
+/**
+ * cik_srbm_select - select specific register instances
+ *
+ * @adev: amdgpu_device pointer
+ * @me: selected ME (micro engine)
+ * @pipe: pipe
+ * @queue: queue
+ * @vmid: VMID
+ *
+ * Switches the currently active registers instances.  Some
+ * registers are instanced per VMID, others are instanced per
+ * me/pipe/queue combination.
+ */
+void cik_srbm_select(struct amdgpu_device *adev,
+		     u32 me, u32 pipe, u32 queue, u32 vmid)
+{
+	u32 srbm_gfx_cntl =
+		(((pipe << SRBM_GFX_CNTL__PIPEID__SHIFT) & SRBM_GFX_CNTL__PIPEID_MASK)|
+		((me << SRBM_GFX_CNTL__MEID__SHIFT) & SRBM_GFX_CNTL__MEID_MASK)|
+		((vmid << SRBM_GFX_CNTL__VMID__SHIFT) & SRBM_GFX_CNTL__VMID_MASK)|
+		((queue << SRBM_GFX_CNTL__QUEUEID__SHIFT) & SRBM_GFX_CNTL__QUEUEID_MASK));
+	WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
+}
+
+static void cik_vga_set_state(struct amdgpu_device *adev, bool state)
+{
+	uint32_t tmp;
+
+	tmp = RREG32(mmCONFIG_CNTL);
+	if (state == false)
+		tmp |= CONFIG_CNTL__VGA_DIS_MASK;
+	else
+		tmp &= ~CONFIG_CNTL__VGA_DIS_MASK;
+	WREG32(mmCONFIG_CNTL, tmp);
+}
+
+static bool cik_read_disabled_bios(struct amdgpu_device *adev)
+{
+	u32 bus_cntl;
+	u32 d1vga_control = 0;
+	u32 d2vga_control = 0;
+	u32 vga_render_control = 0;
+	u32 rom_cntl;
+	bool r;
+
+	bus_cntl = RREG32(mmBUS_CNTL);
+	if (adev->mode_info.num_crtc) {
+		d1vga_control = RREG32(mmD1VGA_CONTROL);
+		d2vga_control = RREG32(mmD2VGA_CONTROL);
+		vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
+	}
+	rom_cntl = RREG32_SMC(ixROM_CNTL);
+
+	/* enable the rom */
+	WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
+	if (adev->mode_info.num_crtc) {
+		/* Disable VGA mode */
+		WREG32(mmD1VGA_CONTROL,
+		       (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
+					  D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
+		WREG32(mmD2VGA_CONTROL,
+		       (d2vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
+					  D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
+		WREG32(mmVGA_RENDER_CONTROL,
+		       (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
+	}
+	WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
+
+	r = amdgpu_read_bios(adev);
+
+	/* restore regs */
+	WREG32(mmBUS_CNTL, bus_cntl);
+	if (adev->mode_info.num_crtc) {
+		WREG32(mmD1VGA_CONTROL, d1vga_control);
+		WREG32(mmD2VGA_CONTROL, d2vga_control);
+		WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
+	}
+	WREG32_SMC(ixROM_CNTL, rom_cntl);
+	return r;
+}
+
+static struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
+	{mmGRBM_STATUS, false},
+	{mmGB_ADDR_CONFIG, false},
+	{mmMC_ARB_RAMCFG, false},
+	{mmGB_TILE_MODE0, false},
+	{mmGB_TILE_MODE1, false},
+	{mmGB_TILE_MODE2, false},
+	{mmGB_TILE_MODE3, false},
+	{mmGB_TILE_MODE4, false},
+	{mmGB_TILE_MODE5, false},
+	{mmGB_TILE_MODE6, false},
+	{mmGB_TILE_MODE7, false},
+	{mmGB_TILE_MODE8, false},
+	{mmGB_TILE_MODE9, false},
+	{mmGB_TILE_MODE10, false},
+	{mmGB_TILE_MODE11, false},
+	{mmGB_TILE_MODE12, false},
+	{mmGB_TILE_MODE13, false},
+	{mmGB_TILE_MODE14, false},
+	{mmGB_TILE_MODE15, false},
+	{mmGB_TILE_MODE16, false},
+	{mmGB_TILE_MODE17, false},
+	{mmGB_TILE_MODE18, false},
+	{mmGB_TILE_MODE19, false},
+	{mmGB_TILE_MODE20, false},
+	{mmGB_TILE_MODE21, false},
+	{mmGB_TILE_MODE22, false},
+	{mmGB_TILE_MODE23, false},
+	{mmGB_TILE_MODE24, false},
+	{mmGB_TILE_MODE25, false},
+	{mmGB_TILE_MODE26, false},
+	{mmGB_TILE_MODE27, false},
+	{mmGB_TILE_MODE28, false},
+	{mmGB_TILE_MODE29, false},
+	{mmGB_TILE_MODE30, false},
+	{mmGB_TILE_MODE31, false},
+	{mmGB_MACROTILE_MODE0, false},
+	{mmGB_MACROTILE_MODE1, false},
+	{mmGB_MACROTILE_MODE2, false},
+	{mmGB_MACROTILE_MODE3, false},
+	{mmGB_MACROTILE_MODE4, false},
+	{mmGB_MACROTILE_MODE5, false},
+	{mmGB_MACROTILE_MODE6, false},
+	{mmGB_MACROTILE_MODE7, false},
+	{mmGB_MACROTILE_MODE8, false},
+	{mmGB_MACROTILE_MODE9, false},
+	{mmGB_MACROTILE_MODE10, false},
+	{mmGB_MACROTILE_MODE11, false},
+	{mmGB_MACROTILE_MODE12, false},
+	{mmGB_MACROTILE_MODE13, false},
+	{mmGB_MACROTILE_MODE14, false},
+	{mmGB_MACROTILE_MODE15, false},
+	{mmCC_RB_BACKEND_DISABLE, false, true},
+	{mmGC_USER_RB_BACKEND_DISABLE, false, true},
+	{mmGB_BACKEND_MAP, false, false},
+	{mmPA_SC_RASTER_CONFIG, false, true},
+	{mmPA_SC_RASTER_CONFIG_1, false, true},
+};
+
+static uint32_t cik_read_indexed_register(struct amdgpu_device *adev,
+					  u32 se_num, u32 sh_num,
+					  u32 reg_offset)
+{
+	uint32_t val;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+	if (se_num != 0xffffffff || sh_num != 0xffffffff)
+		gfx_v7_0_select_se_sh(adev, se_num, sh_num);
+
+	val = RREG32(reg_offset);
+
+	if (se_num != 0xffffffff || sh_num != 0xffffffff)
+		gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+	mutex_unlock(&adev->grbm_idx_mutex);
+	return val;
+}
+
+static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
+			     u32 sh_num, u32 reg_offset, u32 *value)
+{
+	uint32_t i;
+
+	*value = 0;
+	for (i = 0; i < ARRAY_SIZE(cik_allowed_read_registers); i++) {
+		if (reg_offset != cik_allowed_read_registers[i].reg_offset)
+			continue;
+
+		if (!cik_allowed_read_registers[i].untouched)
+			*value = cik_allowed_read_registers[i].grbm_indexed ?
+				 cik_read_indexed_register(adev, se_num,
+							   sh_num, reg_offset) :
+				 RREG32(reg_offset);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static void cik_print_gpu_status_regs(struct amdgpu_device *adev)
+{
+	dev_info(adev->dev, "  GRBM_STATUS=0x%08X\n",
+		RREG32(mmGRBM_STATUS));
+	dev_info(adev->dev, "  GRBM_STATUS2=0x%08X\n",
+		RREG32(mmGRBM_STATUS2));
+	dev_info(adev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
+		RREG32(mmGRBM_STATUS_SE0));
+	dev_info(adev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
+		RREG32(mmGRBM_STATUS_SE1));
+	dev_info(adev->dev, "  GRBM_STATUS_SE2=0x%08X\n",
+		RREG32(mmGRBM_STATUS_SE2));
+	dev_info(adev->dev, "  GRBM_STATUS_SE3=0x%08X\n",
+		RREG32(mmGRBM_STATUS_SE3));
+	dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
+		RREG32(mmSRBM_STATUS));
+	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
+		RREG32(mmSRBM_STATUS2));
+	dev_info(adev->dev, "  SDMA0_STATUS_REG   = 0x%08X\n",
+		RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
+	dev_info(adev->dev, "  SDMA1_STATUS_REG   = 0x%08X\n",
+		 RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
+	dev_info(adev->dev, "  CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
+	dev_info(adev->dev, "  CP_STALLED_STAT1 = 0x%08x\n",
+		 RREG32(mmCP_STALLED_STAT1));
+	dev_info(adev->dev, "  CP_STALLED_STAT2 = 0x%08x\n",
+		 RREG32(mmCP_STALLED_STAT2));
+	dev_info(adev->dev, "  CP_STALLED_STAT3 = 0x%08x\n",
+		 RREG32(mmCP_STALLED_STAT3));
+	dev_info(adev->dev, "  CP_CPF_BUSY_STAT = 0x%08x\n",
+		 RREG32(mmCP_CPF_BUSY_STAT));
+	dev_info(adev->dev, "  CP_CPF_STALLED_STAT1 = 0x%08x\n",
+		 RREG32(mmCP_CPF_STALLED_STAT1));
+	dev_info(adev->dev, "  CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
+	dev_info(adev->dev, "  CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
+	dev_info(adev->dev, "  CP_CPC_STALLED_STAT1 = 0x%08x\n",
+		 RREG32(mmCP_CPC_STALLED_STAT1));
+	dev_info(adev->dev, "  CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
+}
+
+/**
+ * cik_gpu_check_soft_reset - check which blocks are busy
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Check which blocks are busy and return the relevant reset
+ * mask to be used by cik_gpu_soft_reset().
+ * Returns a mask of the blocks to be reset.
+ */
+u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev)
+{
+	u32 reset_mask = 0;
+	u32 tmp;
+
+	/* GRBM_STATUS */
+	tmp = RREG32(mmGRBM_STATUS);
+	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
+		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
+		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
+		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
+		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
+		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
+		reset_mask |= AMDGPU_RESET_GFX;
+
+	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK))
+		reset_mask |= AMDGPU_RESET_CP;
+
+	/* GRBM_STATUS2 */
+	tmp = RREG32(mmGRBM_STATUS2);
+	if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
+		reset_mask |= AMDGPU_RESET_RLC;
+
+	/* SDMA0_STATUS_REG */
+	tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
+	if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
+		reset_mask |= AMDGPU_RESET_DMA;
+
+	/* SDMA1_STATUS_REG */
+	tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
+	if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
+		reset_mask |= AMDGPU_RESET_DMA1;
+
+	/* SRBM_STATUS2 */
+	tmp = RREG32(mmSRBM_STATUS2);
+	if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK)
+		reset_mask |= AMDGPU_RESET_DMA;
+
+	if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)
+		reset_mask |= AMDGPU_RESET_DMA1;
+
+	/* SRBM_STATUS */
+	tmp = RREG32(mmSRBM_STATUS);
+
+	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+		reset_mask |= AMDGPU_RESET_IH;
+
+	if (tmp & SRBM_STATUS__SEM_BUSY_MASK)
+		reset_mask |= AMDGPU_RESET_SEM;
+
+	if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
+		reset_mask |= AMDGPU_RESET_GRBM;
+
+	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
+		reset_mask |= AMDGPU_RESET_VMC;
+
+	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK))
+		reset_mask |= AMDGPU_RESET_MC;
+
+	if (amdgpu_display_is_display_hung(adev))
+		reset_mask |= AMDGPU_RESET_DISPLAY;
+
+	/* Skip MC reset as it's mostly likely not hung, just busy */
+	if (reset_mask & AMDGPU_RESET_MC) {
+		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+		reset_mask &= ~AMDGPU_RESET_MC;
+	}
+
+	return reset_mask;
+}
+
+/**
+ * cik_gpu_soft_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ * @reset_mask: mask of which blocks to reset
+ *
+ * Soft reset the blocks specified in @reset_mask.
+ */
+static void cik_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
+{
+	struct amdgpu_mode_mc_save save;
+	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+	u32 tmp;
+
+	if (reset_mask == 0)
+		return;
+
+	dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+	cik_print_gpu_status_regs(adev);
+	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
+	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
+
+	/* disable CG/PG */
+
+	/* stop the rlc */
+	gfx_v7_0_rlc_stop(adev);
+
+	/* Disable GFX parsing/prefetching */
+	WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
+
+	/* Disable MEC parsing/prefetching */
+	WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
+
+	if (reset_mask & AMDGPU_RESET_DMA) {
+		/* sdma0 */
+		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
+		tmp |= SDMA0_F32_CNTL__HALT_MASK;
+		WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+	}
+	if (reset_mask & AMDGPU_RESET_DMA1) {
+		/* sdma1 */
+		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
+		tmp |= SDMA0_F32_CNTL__HALT_MASK;
+		WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+	}
+
+	gmc_v7_0_mc_stop(adev, &save);
+	if (amdgpu_asic_wait_for_mc_idle(adev)) {
+		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
+	}
+
+	if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP))
+		grbm_soft_reset = GRBM_SOFT_RESET__SOFT_RESET_CP_MASK |
+			GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK;
+
+	if (reset_mask & AMDGPU_RESET_CP) {
+		grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK;
+
+		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
+	}
+
+	if (reset_mask & AMDGPU_RESET_DMA)
+		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
+
+	if (reset_mask & AMDGPU_RESET_DMA1)
+		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
+
+	if (reset_mask & AMDGPU_RESET_DISPLAY)
+		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
+
+	if (reset_mask & AMDGPU_RESET_RLC)
+		grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
+
+	if (reset_mask & AMDGPU_RESET_SEM)
+		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SEM_MASK;
+
+	if (reset_mask & AMDGPU_RESET_IH)
+		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
+
+	if (reset_mask & AMDGPU_RESET_GRBM)
+		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
+
+	if (reset_mask & AMDGPU_RESET_VMC)
+		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_VMC_MASK;
+
+	if (!(adev->flags & AMDGPU_IS_APU)) {
+		if (reset_mask & AMDGPU_RESET_MC)
+			srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_MC_MASK;
+	}
+
+	if (grbm_soft_reset) {
+		tmp = RREG32(mmGRBM_SOFT_RESET);
+		tmp |= grbm_soft_reset;
+		dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(mmGRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmGRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~grbm_soft_reset;
+		WREG32(mmGRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmGRBM_SOFT_RESET);
+	}
+
+	if (srbm_soft_reset) {
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(mmSRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(mmSRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+	}
+
+	/* Wait a little for things to settle down */
+	udelay(50);
+
+	gmc_v7_0_mc_resume(adev, &save);
+	udelay(50);
+
+	cik_print_gpu_status_regs(adev);
+}
+
+struct kv_reset_save_regs {
+	u32 gmcon_reng_execute;
+	u32 gmcon_misc;
+	u32 gmcon_misc3;
+};
+
+static void kv_save_regs_for_reset(struct amdgpu_device *adev,
+				   struct kv_reset_save_regs *save)
+{
+	save->gmcon_reng_execute = RREG32(mmGMCON_RENG_EXECUTE);
+	save->gmcon_misc = RREG32(mmGMCON_MISC);
+	save->gmcon_misc3 = RREG32(mmGMCON_MISC3);
+
+	WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute &
+		~GMCON_RENG_EXECUTE__RENG_EXECUTE_ON_PWR_UP_MASK);
+	WREG32(mmGMCON_MISC, save->gmcon_misc &
+		~(GMCON_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK |
+			GMCON_MISC__STCTRL_STUTTER_EN_MASK));
+}
+
+static void kv_restore_regs_for_reset(struct amdgpu_device *adev,
+				      struct kv_reset_save_regs *save)
+{
+	int i;
+
+	WREG32(mmGMCON_PGFSM_WRITE, 0);
+	WREG32(mmGMCON_PGFSM_CONFIG, 0x200010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(mmGMCON_PGFSM_WRITE, 0);
+
+	WREG32(mmGMCON_PGFSM_WRITE, 0);
+	WREG32(mmGMCON_PGFSM_CONFIG, 0x300010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(mmGMCON_PGFSM_WRITE, 0);
+
+	WREG32(mmGMCON_PGFSM_WRITE, 0x210000);
+	WREG32(mmGMCON_PGFSM_CONFIG, 0xa00010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(mmGMCON_PGFSM_WRITE, 0);
+
+	WREG32(mmGMCON_PGFSM_WRITE, 0x21003);
+	WREG32(mmGMCON_PGFSM_CONFIG, 0xb00010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(mmGMCON_PGFSM_WRITE, 0);
+
+	WREG32(mmGMCON_PGFSM_WRITE, 0x2b00);
+	WREG32(mmGMCON_PGFSM_CONFIG, 0xc00010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(mmGMCON_PGFSM_WRITE, 0);
+
+	WREG32(mmGMCON_PGFSM_WRITE, 0);
+	WREG32(mmGMCON_PGFSM_CONFIG, 0xd00010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(mmGMCON_PGFSM_WRITE, 0);
+
+	WREG32(mmGMCON_PGFSM_WRITE, 0x420000);
+	WREG32(mmGMCON_PGFSM_CONFIG, 0x100010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(mmGMCON_PGFSM_WRITE, 0);
+
+	WREG32(mmGMCON_PGFSM_WRITE, 0x120202);
+	WREG32(mmGMCON_PGFSM_CONFIG, 0x500010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(mmGMCON_PGFSM_WRITE, 0);
+
+	WREG32(mmGMCON_PGFSM_WRITE, 0x3e3e36);
+	WREG32(mmGMCON_PGFSM_CONFIG, 0x600010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(mmGMCON_PGFSM_WRITE, 0);
+
+	WREG32(mmGMCON_PGFSM_WRITE, 0x373f3e);
+	WREG32(mmGMCON_PGFSM_CONFIG, 0x700010ff);
+
+	for (i = 0; i < 5; i++)
+		WREG32(mmGMCON_PGFSM_WRITE, 0);
+
+	WREG32(mmGMCON_PGFSM_WRITE, 0x3e1332);
+	WREG32(mmGMCON_PGFSM_CONFIG, 0xe00010ff);
+
+	WREG32(mmGMCON_MISC3, save->gmcon_misc3);
+	WREG32(mmGMCON_MISC, save->gmcon_misc);
+	WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute);
+}
+
+static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
+{
+	struct amdgpu_mode_mc_save save;
+	struct kv_reset_save_regs kv_save = { 0 };
+	u32 tmp, i;
+
+	dev_info(adev->dev, "GPU pci config reset\n");
+
+	/* disable dpm? */
+
+	/* disable cg/pg */
+
+	/* Disable GFX parsing/prefetching */
+	WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK |
+		CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
+
+	/* Disable MEC parsing/prefetching */
+	WREG32(mmCP_MEC_CNTL,
+			CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
+
+	/* sdma0 */
+	tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
+	tmp |= SDMA0_F32_CNTL__HALT_MASK;
+	WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+	/* sdma1 */
+	tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
+	tmp |= SDMA0_F32_CNTL__HALT_MASK;
+	WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+	/* XXX other engines? */
+
+	/* halt the rlc, disable cp internal ints */
+	gfx_v7_0_rlc_stop(adev);
+
+	udelay(50);
+
+	/* disable mem access */
+	gmc_v7_0_mc_stop(adev, &save);
+	if (amdgpu_asic_wait_for_mc_idle(adev)) {
+		dev_warn(adev->dev, "Wait for MC idle timed out !\n");
+	}
+
+	if (adev->flags & AMDGPU_IS_APU)
+		kv_save_regs_for_reset(adev, &kv_save);
+
+	/* disable BM */
+	pci_clear_master(adev->pdev);
+	/* reset */
+	amdgpu_pci_config_reset(adev);
+
+	udelay(100);
+
+	/* wait for asic to come out of reset */
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
+			break;
+		udelay(1);
+	}
+
+	/* does asic init need to be run first??? */
+	if (adev->flags & AMDGPU_IS_APU)
+		kv_restore_regs_for_reset(adev, &kv_save);
+}
+
+static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
+{
+	u32 tmp = RREG32(mmBIOS_SCRATCH_3);
+
+	if (hung)
+		tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+	else
+		tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+
+	WREG32(mmBIOS_SCRATCH_3, tmp);
+}
+
+/**
+ * cik_asic_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up which blocks are hung and attempt
+ * to reset them.
+ * Returns 0 for success.
+ */
+static int cik_asic_reset(struct amdgpu_device *adev)
+{
+	u32 reset_mask;
+
+	reset_mask = amdgpu_cik_gpu_check_soft_reset(adev);
+
+	if (reset_mask)
+		cik_set_bios_scratch_engine_hung(adev, true);
+
+	/* try soft reset */
+	cik_gpu_soft_reset(adev, reset_mask);
+
+	reset_mask = amdgpu_cik_gpu_check_soft_reset(adev);
+
+	/* try pci config reset */
+	if (reset_mask && amdgpu_hard_reset)
+		cik_gpu_pci_config_reset(adev);
+
+	reset_mask = amdgpu_cik_gpu_check_soft_reset(adev);
+
+	if (!reset_mask)
+		cik_set_bios_scratch_engine_hung(adev, false);
+
+	return 0;
+}
+
+static int cik_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
+			      u32 cntl_reg, u32 status_reg)
+{
+	int r, i;
+	struct atom_clock_dividers dividers;
+	uint32_t tmp;
+
+	r = amdgpu_atombios_get_clock_dividers(adev,
+					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+					       clock, false, &dividers);
+	if (r)
+		return r;
+
+	tmp = RREG32_SMC(cntl_reg);
+	tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
+		CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
+	tmp |= dividers.post_divider;
+	WREG32_SMC(cntl_reg, tmp);
+
+	for (i = 0; i < 100; i++) {
+		if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
+			break;
+		mdelay(10);
+	}
+	if (i == 100)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int cik_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
+{
+	int r = 0;
+
+	r = cik_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
+	if (r)
+		return r;
+
+	r = cik_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
+	return r;
+}
+
+static int cik_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
+{
+	int r, i;
+	struct atom_clock_dividers dividers;
+	u32 tmp;
+
+	r = amdgpu_atombios_get_clock_dividers(adev,
+					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+					       ecclk, false, &dividers);
+	if (r)
+		return r;
+
+	for (i = 0; i < 100; i++) {
+		if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
+			break;
+		mdelay(10);
+	}
+	if (i == 100)
+		return -ETIMEDOUT;
+
+	tmp = RREG32_SMC(ixCG_ECLK_CNTL);
+	tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK |
+		CG_ECLK_CNTL__ECLK_DIVIDER_MASK);
+	tmp |= dividers.post_divider;
+	WREG32_SMC(ixCG_ECLK_CNTL, tmp);
+
+	for (i = 0; i < 100; i++) {
+		if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
+			break;
+		mdelay(10);
+	}
+	if (i == 100)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
+{
+	struct pci_dev *root = adev->pdev->bus->self;
+	int bridge_pos, gpu_pos;
+	u32 speed_cntl, mask, current_data_rate;
+	int ret, i;
+	u16 tmp16;
+
+	if (amdgpu_pcie_gen2 == 0)
+		return;
+
+	if (adev->flags & AMDGPU_IS_APU)
+		return;
+
+	ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+	if (ret != 0)
+		return;
+
+	if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+		return;
+
+	speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
+	current_data_rate = (speed_cntl & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) >>
+		PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+	if (mask & DRM_PCIE_SPEED_80) {
+		if (current_data_rate == 2) {
+			DRM_INFO("PCIE gen 3 link speeds already enabled\n");
+			return;
+		}
+		DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
+	} else if (mask & DRM_PCIE_SPEED_50) {
+		if (current_data_rate == 1) {
+			DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+			return;
+		}
+		DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
+	}
+
+	bridge_pos = pci_pcie_cap(root);
+	if (!bridge_pos)
+		return;
+
+	gpu_pos = pci_pcie_cap(adev->pdev);
+	if (!gpu_pos)
+		return;
+
+	if (mask & DRM_PCIE_SPEED_80) {
+		/* re-try equalization if gen3 is not already enabled */
+		if (current_data_rate != 2) {
+			u16 bridge_cfg, gpu_cfg;
+			u16 bridge_cfg2, gpu_cfg2;
+			u32 max_lw, current_lw, tmp;
+
+			pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+			pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+			tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
+			pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+			tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
+			pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+			tmp = RREG32_PCIE(ixPCIE_LC_STATUS1);
+			max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >>
+				PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH__SHIFT;
+			current_lw = (tmp & PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH_MASK)
+				>> PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH__SHIFT;
+
+			if (current_lw < max_lw) {
+				tmp = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL);
+				if (tmp & PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATION_SUPPORT_MASK) {
+					tmp &= ~(PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK |
+						PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_DIS_MASK);
+					tmp |= (max_lw <<
+						PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT);
+					tmp |= PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_SUPPORT_MASK |
+					PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATE_EN_MASK |
+					PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK;
+					WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, tmp);
+				}
+			}
+
+			for (i = 0; i < 10; i++) {
+				/* check status */
+				pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+				if (tmp16 & PCI_EXP_DEVSTA_TRPND)
+					break;
+
+				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+				pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
+				pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+
+				tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
+				tmp |= PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
+				WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
+
+				tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
+				tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK;
+				WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
+
+				mdelay(100);
+
+				/* linkctl */
+				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+				tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
+				pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+				pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+				tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
+				pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+				/* linkctl2 */
+				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
+				tmp16 &= ~((1 << 4) | (7 << 9));
+				tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
+				pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
+
+				pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+				tmp16 &= ~((1 << 4) | (7 << 9));
+				tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
+				pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+				tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
+				tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
+				WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
+			}
+		}
+	}
+
+	/* set the link speed */
+	speed_cntl |= PCIE_LC_SPEED_CNTL__LC_FORCE_EN_SW_SPEED_CHANGE_MASK |
+		PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_HW_SPEED_CHANGE_MASK;
+	speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK;
+	WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl);
+
+	pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+	tmp16 &= ~0xf;
+	if (mask & DRM_PCIE_SPEED_80)
+		tmp16 |= 3; /* gen3 */
+	else if (mask & DRM_PCIE_SPEED_50)
+		tmp16 |= 2; /* gen2 */
+	else
+		tmp16 |= 1; /* gen1 */
+	pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+	speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
+	speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK;
+	WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl);
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
+		if ((speed_cntl & PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK) == 0)
+			break;
+		udelay(1);
+	}
+}
+
+static void cik_program_aspm(struct amdgpu_device *adev)
+{
+	u32 data, orig;
+	bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
+	bool disable_clkreq = false;
+
+	if (amdgpu_aspm == 0)
+		return;
+
+	/* XXX double check APUs */
+	if (adev->flags & AMDGPU_IS_APU)
+		return;
+
+	orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
+	data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK;
+	data |= (0x24 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT) |
+		PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK;
+	if (orig != data)
+		WREG32_PCIE(ixPCIE_LC_N_FTS_CNTL, data);
+
+	orig = data = RREG32_PCIE(ixPCIE_LC_CNTL3);
+	data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK;
+	if (orig != data)
+		WREG32_PCIE(ixPCIE_LC_CNTL3, data);
+
+	orig = data = RREG32_PCIE(ixPCIE_P_CNTL);
+	data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK;
+	if (orig != data)
+		WREG32_PCIE(ixPCIE_P_CNTL, data);
+
+	orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
+	data &= ~(PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK |
+		PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK);
+	data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
+	if (!disable_l0s)
+		data |= (7 << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT);
+
+	if (!disable_l1) {
+		data |= (7 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT);
+		data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
+		if (orig != data)
+			WREG32_PCIE(ixPCIE_LC_CNTL, data);
+
+		if (!disable_plloff_in_l1) {
+			bool clk_req_support;
+
+			orig = data = RREG32_PCIE(ixPB0_PIF_PWRDOWN_0);
+			data &= ~(PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK |
+				PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK);
+			data |= (7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) |
+				(7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT);
+			if (orig != data)
+				WREG32_PCIE(ixPB0_PIF_PWRDOWN_0, data);
+
+			orig = data = RREG32_PCIE(ixPB0_PIF_PWRDOWN_1);
+			data &= ~(PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK |
+				PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK);
+			data |= (7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) |
+				(7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT);
+			if (orig != data)
+				WREG32_PCIE(ixPB0_PIF_PWRDOWN_1, data);
+
+			orig = data = RREG32_PCIE(ixPB1_PIF_PWRDOWN_0);
+			data &= ~(PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK |
+				PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK);
+			data |= (7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) |
+				(7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT);
+			if (orig != data)
+				WREG32_PCIE(ixPB1_PIF_PWRDOWN_0, data);
+
+			orig = data = RREG32_PCIE(ixPB1_PIF_PWRDOWN_1);
+			data &= ~(PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK |
+				PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK);
+			data |= (7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) |
+				(7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT);
+			if (orig != data)
+				WREG32_PCIE(ixPB1_PIF_PWRDOWN_1, data);
+
+			orig = data = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL);
+			data &= ~PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK;
+			data |= ~(3 << PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE__SHIFT);
+			if (orig != data)
+				WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, data);
+
+			if (!disable_clkreq) {
+				struct pci_dev *root = adev->pdev->bus->self;
+				u32 lnkcap;
+
+				clk_req_support = false;
+				pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
+				if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
+					clk_req_support = true;
+			} else {
+				clk_req_support = false;
+			}
+
+			if (clk_req_support) {
+				orig = data = RREG32_PCIE(ixPCIE_LC_CNTL2);
+				data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
+					PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
+				if (orig != data)
+					WREG32_PCIE(ixPCIE_LC_CNTL2, data);
+
+				orig = data = RREG32_SMC(ixTHM_CLK_CNTL);
+				data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK |
+					THM_CLK_CNTL__TMON_CLK_SEL_MASK);
+				data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) |
+					(1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT);
+				if (orig != data)
+					WREG32_SMC(ixTHM_CLK_CNTL, data);
+
+				orig = data = RREG32_SMC(ixMISC_CLK_CTRL);
+				data &= ~(MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK |
+					MISC_CLK_CTRL__ZCLK_SEL_MASK);
+				data |= (1 << MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT) |
+					(1 << MISC_CLK_CTRL__ZCLK_SEL__SHIFT);
+				if (orig != data)
+					WREG32_SMC(ixMISC_CLK_CTRL, data);
+
+				orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL);
+				data &= ~CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK;
+				if (orig != data)
+					WREG32_SMC(ixCG_CLKPIN_CNTL, data);
+
+				orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
+				data &= ~CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK;
+				if (orig != data)
+					WREG32_SMC(ixCG_CLKPIN_CNTL_2, data);
+
+				orig = data = RREG32_SMC(ixMPLL_BYPASSCLK_SEL);
+				data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK;
+				data |= (4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT);
+				if (orig != data)
+					WREG32_SMC(ixMPLL_BYPASSCLK_SEL, data);
+			}
+		}
+	} else {
+		if (orig != data)
+			WREG32_PCIE(ixPCIE_LC_CNTL, data);
+	}
+
+	orig = data = RREG32_PCIE(ixPCIE_CNTL2);
+	data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
+		PCIE_CNTL2__MST_MEM_LS_EN_MASK |
+		PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
+	if (orig != data)
+		WREG32_PCIE(ixPCIE_CNTL2, data);
+
+	if (!disable_l0s) {
+		data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
+		if ((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) ==
+				PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) {
+			data = RREG32_PCIE(ixPCIE_LC_STATUS1);
+			if ((data & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK) &&
+			(data & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK)) {
+				orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
+				data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+				if (orig != data)
+					WREG32_PCIE(ixPCIE_LC_CNTL, data);
+			}
+		}
+	}
+}
+
+static uint32_t cik_get_rev_id(struct amdgpu_device *adev)
+{
+	return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
+		>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
+}
+
+static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &ci_dpm_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 8,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &dce_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 7,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &gfx_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_sdma_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 4,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &uvd_v4_2_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v2_0_ip_funcs,
+	},
+};
+
+static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &ci_dpm_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 8,
+		.minor = 5,
+		.rev = 0,
+		.funcs = &dce_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 7,
+		.minor = 3,
+		.rev = 0,
+		.funcs = &gfx_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_sdma_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 4,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &uvd_v4_2_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v2_0_ip_funcs,
+	},
+};
+
+static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &kv_dpm_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 8,
+		.minor = 3,
+		.rev = 0,
+		.funcs = &dce_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 7,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &gfx_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_sdma_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 4,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &uvd_v4_2_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v2_0_ip_funcs,
+	},
+};
+
+static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &kv_dpm_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 8,
+		.minor = 3,
+		.rev = 0,
+		.funcs = &dce_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 7,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &gfx_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_sdma_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 4,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &uvd_v4_2_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v2_0_ip_funcs,
+	},
+};
+
+static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 1,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gmc_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &kv_dpm_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 8,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &dce_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 7,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &gfx_v7_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &cik_sdma_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 4,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &uvd_v4_2_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v2_0_ip_funcs,
+	},
+};
+
+int cik_set_ip_blocks(struct amdgpu_device *adev)
+{
+	switch (adev->asic_type) {
+	case CHIP_BONAIRE:
+		adev->ip_blocks = bonaire_ip_blocks;
+		adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
+		break;
+	case CHIP_HAWAII:
+		adev->ip_blocks = hawaii_ip_blocks;
+		adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
+		break;
+	case CHIP_KAVERI:
+		adev->ip_blocks = kaveri_ip_blocks;
+		adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
+		break;
+	case CHIP_KABINI:
+		adev->ip_blocks = kabini_ip_blocks;
+		adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
+		break;
+	case CHIP_MULLINS:
+		adev->ip_blocks = mullins_ip_blocks;
+		adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
+		break;
+	default:
+		/* FIXME: not supported yet */
+		return -EINVAL;
+	}
+
+	adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL);
+	if (adev->ip_block_enabled == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static const struct amdgpu_asic_funcs cik_asic_funcs =
+{
+	.read_disabled_bios = &cik_read_disabled_bios,
+	.read_register = &cik_read_register,
+	.reset = &cik_asic_reset,
+	.set_vga_state = &cik_vga_set_state,
+	.get_xclk = &cik_get_xclk,
+	.set_uvd_clocks = &cik_set_uvd_clocks,
+	.set_vce_clocks = &cik_set_vce_clocks,
+	.get_cu_info = &gfx_v7_0_get_cu_info,
+	/* these should be moved to their own ip modules */
+	.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
+	.wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
+};
+
+static int cik_common_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	adev->smc_rreg = &cik_smc_rreg;
+	adev->smc_wreg = &cik_smc_wreg;
+	adev->pcie_rreg = &cik_pcie_rreg;
+	adev->pcie_wreg = &cik_pcie_wreg;
+	adev->uvd_ctx_rreg = &cik_uvd_ctx_rreg;
+	adev->uvd_ctx_wreg = &cik_uvd_ctx_wreg;
+	adev->didt_rreg = &cik_didt_rreg;
+	adev->didt_wreg = &cik_didt_wreg;
+
+	adev->asic_funcs = &cik_asic_funcs;
+
+	adev->has_uvd = true;
+
+	adev->rev_id = cik_get_rev_id(adev);
+	adev->external_rev_id = 0xFF;
+	switch (adev->asic_type) {
+	case CHIP_BONAIRE:
+		adev->cg_flags =
+			AMDGPU_CG_SUPPORT_GFX_MGCG |
+			AMDGPU_CG_SUPPORT_GFX_MGLS |
+			/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
+			AMDGPU_CG_SUPPORT_GFX_CGLS |
+			AMDGPU_CG_SUPPORT_GFX_CGTS |
+			AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
+			AMDGPU_CG_SUPPORT_GFX_CP_LS |
+			AMDGPU_CG_SUPPORT_MC_LS |
+			AMDGPU_CG_SUPPORT_MC_MGCG |
+			AMDGPU_CG_SUPPORT_SDMA_MGCG |
+			AMDGPU_CG_SUPPORT_SDMA_LS |
+			AMDGPU_CG_SUPPORT_BIF_LS |
+			AMDGPU_CG_SUPPORT_VCE_MGCG |
+			AMDGPU_CG_SUPPORT_UVD_MGCG |
+			AMDGPU_CG_SUPPORT_HDP_LS |
+			AMDGPU_CG_SUPPORT_HDP_MGCG;
+		adev->pg_flags = 0;
+		adev->external_rev_id = adev->rev_id + 0x14;
+		break;
+	case CHIP_HAWAII:
+		adev->cg_flags =
+			AMDGPU_CG_SUPPORT_GFX_MGCG |
+			AMDGPU_CG_SUPPORT_GFX_MGLS |
+			/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
+			AMDGPU_CG_SUPPORT_GFX_CGLS |
+			AMDGPU_CG_SUPPORT_GFX_CGTS |
+			AMDGPU_CG_SUPPORT_GFX_CP_LS |
+			AMDGPU_CG_SUPPORT_MC_LS |
+			AMDGPU_CG_SUPPORT_MC_MGCG |
+			AMDGPU_CG_SUPPORT_SDMA_MGCG |
+			AMDGPU_CG_SUPPORT_SDMA_LS |
+			AMDGPU_CG_SUPPORT_BIF_LS |
+			AMDGPU_CG_SUPPORT_VCE_MGCG |
+			AMDGPU_CG_SUPPORT_UVD_MGCG |
+			AMDGPU_CG_SUPPORT_HDP_LS |
+			AMDGPU_CG_SUPPORT_HDP_MGCG;
+		adev->pg_flags = 0;
+		adev->external_rev_id = 0x28;
+		break;
+	case CHIP_KAVERI:
+		adev->cg_flags =
+			AMDGPU_CG_SUPPORT_GFX_MGCG |
+			AMDGPU_CG_SUPPORT_GFX_MGLS |
+			/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
+			AMDGPU_CG_SUPPORT_GFX_CGLS |
+			AMDGPU_CG_SUPPORT_GFX_CGTS |
+			AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
+			AMDGPU_CG_SUPPORT_GFX_CP_LS |
+			AMDGPU_CG_SUPPORT_SDMA_MGCG |
+			AMDGPU_CG_SUPPORT_SDMA_LS |
+			AMDGPU_CG_SUPPORT_BIF_LS |
+			AMDGPU_CG_SUPPORT_VCE_MGCG |
+			AMDGPU_CG_SUPPORT_UVD_MGCG |
+			AMDGPU_CG_SUPPORT_HDP_LS |
+			AMDGPU_CG_SUPPORT_HDP_MGCG;
+		adev->pg_flags =
+			/*AMDGPU_PG_SUPPORT_GFX_PG |
+			  AMDGPU_PG_SUPPORT_GFX_SMG |
+			  AMDGPU_PG_SUPPORT_GFX_DMG |*/
+			AMDGPU_PG_SUPPORT_UVD |
+			/*AMDGPU_PG_SUPPORT_VCE |
+			  AMDGPU_PG_SUPPORT_CP |
+			  AMDGPU_PG_SUPPORT_GDS |
+			  AMDGPU_PG_SUPPORT_RLC_SMU_HS |
+			  AMDGPU_PG_SUPPORT_ACP |
+			  AMDGPU_PG_SUPPORT_SAMU |*/
+			0;
+		if (adev->pdev->device == 0x1312 ||
+			adev->pdev->device == 0x1316 ||
+			adev->pdev->device == 0x1317)
+			adev->external_rev_id = 0x41;
+		else
+			adev->external_rev_id = 0x1;
+		break;
+	case CHIP_KABINI:
+	case CHIP_MULLINS:
+		adev->cg_flags =
+			AMDGPU_CG_SUPPORT_GFX_MGCG |
+			AMDGPU_CG_SUPPORT_GFX_MGLS |
+			/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
+			AMDGPU_CG_SUPPORT_GFX_CGLS |
+			AMDGPU_CG_SUPPORT_GFX_CGTS |
+			AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
+			AMDGPU_CG_SUPPORT_GFX_CP_LS |
+			AMDGPU_CG_SUPPORT_SDMA_MGCG |
+			AMDGPU_CG_SUPPORT_SDMA_LS |
+			AMDGPU_CG_SUPPORT_BIF_LS |
+			AMDGPU_CG_SUPPORT_VCE_MGCG |
+			AMDGPU_CG_SUPPORT_UVD_MGCG |
+			AMDGPU_CG_SUPPORT_HDP_LS |
+			AMDGPU_CG_SUPPORT_HDP_MGCG;
+		adev->pg_flags =
+			/*AMDGPU_PG_SUPPORT_GFX_PG |
+			  AMDGPU_PG_SUPPORT_GFX_SMG | */
+			AMDGPU_PG_SUPPORT_UVD |
+			/*AMDGPU_PG_SUPPORT_VCE |
+			  AMDGPU_PG_SUPPORT_CP |
+			  AMDGPU_PG_SUPPORT_GDS |
+			  AMDGPU_PG_SUPPORT_RLC_SMU_HS |
+			  AMDGPU_PG_SUPPORT_SAMU |*/
+			0;
+		if (adev->asic_type == CHIP_KABINI) {
+			if (adev->rev_id == 0)
+				adev->external_rev_id = 0x81;
+			else if (adev->rev_id == 1)
+				adev->external_rev_id = 0x82;
+			else if (adev->rev_id == 2)
+				adev->external_rev_id = 0x85;
+		} else
+			adev->external_rev_id = adev->rev_id + 0xa1;
+		break;
+	default:
+		/* FIXME: not supported yet */
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cik_common_sw_init(void *handle)
+{
+	return 0;
+}
+
+static int cik_common_sw_fini(void *handle)
+{
+	return 0;
+}
+
+static int cik_common_hw_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	/* move the golden regs per IP block */
+	cik_init_golden_registers(adev);
+	/* enable pcie gen2/3 link */
+	cik_pcie_gen3_enable(adev);
+	/* enable aspm */
+	cik_program_aspm(adev);
+
+	return 0;
+}
+
+static int cik_common_hw_fini(void *handle)
+{
+	return 0;
+}
+
+static int cik_common_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return cik_common_hw_fini(adev);
+}
+
+static int cik_common_resume(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return cik_common_hw_init(adev);
+}
+
+static bool cik_common_is_idle(void *handle)
+{
+	return true;
+}
+
+static int cik_common_wait_for_idle(void *handle)
+{
+	return 0;
+}
+
+static void cik_common_print_status(void *handle)
+{
+
+}
+
+static int cik_common_soft_reset(void *handle)
+{
+	/* XXX hard reset?? */
+	return 0;
+}
+
+static int cik_common_set_clockgating_state(void *handle,
+					    enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int cik_common_set_powergating_state(void *handle,
+					    enum amd_powergating_state state)
+{
+	return 0;
+}
+
+const struct amd_ip_funcs cik_common_ip_funcs = {
+	.early_init = cik_common_early_init,
+	.late_init = NULL,
+	.sw_init = cik_common_sw_init,
+	.sw_fini = cik_common_sw_fini,
+	.hw_init = cik_common_hw_init,
+	.hw_fini = cik_common_hw_fini,
+	.suspend = cik_common_suspend,
+	.resume = cik_common_resume,
+	.is_idle = cik_common_is_idle,
+	.wait_for_idle = cik_common_wait_for_idle,
+	.soft_reset = cik_common_soft_reset,
+	.print_status = cik_common_print_status,
+	.set_clockgating_state = cik_common_set_clockgating_state,
+	.set_powergating_state = cik_common_set_powergating_state,
+};

+ 33 - 0
drivers/gpu/drm/amd/amdgpu/cik.h

@@ -0,0 +1,33 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __CIK_H__
+#define __CIK_H__
+
+extern const struct amd_ip_funcs cik_common_ip_funcs;
+
+void cik_srbm_select(struct amdgpu_device *adev,
+		     u32 me, u32 pipe, u32 queue, u32 vmid);
+int cik_set_ip_blocks(struct amdgpu_device *adev);
+
+#endif

+ 30 - 0
drivers/gpu/drm/amd/amdgpu/cik_dpm.h

@@ -0,0 +1,30 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __CIK_DPM_H__
+#define __CIK_DPM_H__
+
+extern const struct amd_ip_funcs ci_dpm_ip_funcs;
+extern const struct amd_ip_funcs kv_dpm_ip_funcs;
+
+#endif

+ 471 - 0
drivers/gpu/drm/amd/amdgpu/cik_ih.c

@@ -0,0 +1,471 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "cikd.h"
+
+#include "bif/bif_4_1_d.h"
+#include "bif/bif_4_1_sh_mask.h"
+
+#include "oss/oss_2_0_d.h"
+#include "oss/oss_2_0_sh_mask.h"
+
+/*
+ * Interrupts
+ * Starting with r6xx, interrupts are handled via a ring buffer.
+ * Ring buffers are areas of GPU accessible memory that the GPU
+ * writes interrupt vectors into and the host reads vectors out of.
+ * There is a rptr (read pointer) that determines where the
+ * host is currently reading, and a wptr (write pointer)
+ * which determines where the GPU has written.  When the
+ * pointers are equal, the ring is idle.  When the GPU
+ * writes vectors to the ring buffer, it increments the
+ * wptr.  When there is an interrupt, the host then starts
+ * fetching commands and processing them until the pointers are
+ * equal again at which point it updates the rptr.
+ */
+
+static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev);
+
+/**
+ * cik_ih_enable_interrupts - Enable the interrupt ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Enable the interrupt ring buffer (CIK).
+ */
+static void cik_ih_enable_interrupts(struct amdgpu_device *adev)
+{
+	u32 ih_cntl = RREG32(mmIH_CNTL);
+	u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
+
+	ih_cntl |= IH_CNTL__ENABLE_INTR_MASK;
+	ih_rb_cntl |= IH_RB_CNTL__RB_ENABLE_MASK;
+	WREG32(mmIH_CNTL, ih_cntl);
+	WREG32(mmIH_RB_CNTL, ih_rb_cntl);
+	adev->irq.ih.enabled = true;
+}
+
+/**
+ * cik_ih_disable_interrupts - Disable the interrupt ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disable the interrupt ring buffer (CIK).
+ */
+static void cik_ih_disable_interrupts(struct amdgpu_device *adev)
+{
+	u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
+	u32 ih_cntl = RREG32(mmIH_CNTL);
+
+	ih_rb_cntl &= ~IH_RB_CNTL__RB_ENABLE_MASK;
+	ih_cntl &= ~IH_CNTL__ENABLE_INTR_MASK;
+	WREG32(mmIH_RB_CNTL, ih_rb_cntl);
+	WREG32(mmIH_CNTL, ih_cntl);
+	/* set rptr, wptr to 0 */
+	WREG32(mmIH_RB_RPTR, 0);
+	WREG32(mmIH_RB_WPTR, 0);
+	adev->irq.ih.enabled = false;
+	adev->irq.ih.rptr = 0;
+}
+
+/**
+ * cik_ih_irq_init - init and enable the interrupt ring
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate a ring buffer for the interrupt controller,
+ * enable the RLC, disable interrupts, enable the IH
+ * ring buffer and enable it (CIK).
+ * Called at device load and reume.
+ * Returns 0 for success, errors for failure.
+ */
+static int cik_ih_irq_init(struct amdgpu_device *adev)
+{
+	int ret = 0;
+	int rb_bufsz;
+	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+	u64 wptr_off;
+
+	/* disable irqs */
+	cik_ih_disable_interrupts(adev);
+
+	/* setup interrupt control */
+	WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
+	interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
+	/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
+	 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
+	 */
+	interrupt_cntl &= ~INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK;
+	/* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
+	interrupt_cntl &= ~INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK;
+	WREG32(mmINTERRUPT_CNTL, interrupt_cntl);
+
+	WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
+	rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
+
+	ih_rb_cntl = (IH_RB_CNTL__WPTR_OVERFLOW_ENABLE_MASK |
+		      IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK |
+		      (rb_bufsz << 1));
+
+	ih_rb_cntl |= IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK;
+
+	/* set the writeback address whether it's enabled or not */
+	wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
+	WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
+	WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+
+	WREG32(mmIH_RB_CNTL, ih_rb_cntl);
+
+	/* set rptr, wptr to 0 */
+	WREG32(mmIH_RB_RPTR, 0);
+	WREG32(mmIH_RB_WPTR, 0);
+
+	/* Default settings for IH_CNTL (disabled at first) */
+	ih_cntl = (0x10 << IH_CNTL__MC_WRREQ_CREDIT__SHIFT) |
+		(0x10 << IH_CNTL__MC_WR_CLEAN_CNT__SHIFT) |
+		(0 << IH_CNTL__MC_VMID__SHIFT);
+	/* IH_CNTL__RPTR_REARM_MASK only works if msi's are enabled */
+	if (adev->irq.msi_enabled)
+		ih_cntl |= IH_CNTL__RPTR_REARM_MASK;
+	WREG32(mmIH_CNTL, ih_cntl);
+
+	pci_set_master(adev->pdev);
+
+	/* enable irqs */
+	cik_ih_enable_interrupts(adev);
+
+	return ret;
+}
+
+/**
+ * cik_ih_irq_disable - disable interrupts
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disable interrupts on the hw (CIK).
+ */
+static void cik_ih_irq_disable(struct amdgpu_device *adev)
+{
+	cik_ih_disable_interrupts(adev);
+	/* Wait and acknowledge irq */
+	mdelay(1);
+}
+
+/**
+ * cik_ih_get_wptr - get the IH ring buffer wptr
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Get the IH ring buffer wptr from either the register
+ * or the writeback memory buffer (CIK).  Also check for
+ * ring buffer overflow and deal with it.
+ * Used by cik_irq_process().
+ * Returns the value of the wptr.
+ */
+static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
+{
+	u32 wptr, tmp;
+
+	wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
+
+	if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
+		wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
+		/* When a ring buffer overflow happen start parsing interrupt
+		 * from the last not overwritten vector (wptr + 16). Hopefully
+		 * this should allow us to catchup.
+		 */
+		dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+			wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
+		adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
+		tmp = RREG32(mmIH_RB_CNTL);
+		tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
+		WREG32(mmIH_RB_CNTL, tmp);
+	}
+	return (wptr & adev->irq.ih.ptr_mask);
+}
+
+/*        CIK IV Ring
+ * Each IV ring entry is 128 bits:
+ * [7:0]    - interrupt source id
+ * [31:8]   - reserved
+ * [59:32]  - interrupt source data
+ * [63:60]  - reserved
+ * [71:64]  - RINGID
+ *            CP:
+ *            ME_ID [1:0], PIPE_ID[1:0], QUEUE_ID[2:0]
+ *            QUEUE_ID - for compute, which of the 8 queues owned by the dispatcher
+ *                     - for gfx, hw shader state (0=PS...5=LS, 6=CS)
+ *            ME_ID - 0 = gfx, 1 = first 4 CS pipes, 2 = second 4 CS pipes
+ *            PIPE_ID - ME0 0=3D
+ *                    - ME1&2 compute dispatcher (4 pipes each)
+ *            SDMA:
+ *            INSTANCE_ID [1:0], QUEUE_ID[1:0]
+ *            INSTANCE_ID - 0 = sdma0, 1 = sdma1
+ *            QUEUE_ID - 0 = gfx, 1 = rlc0, 2 = rlc1
+ * [79:72]  - VMID
+ * [95:80]  - PASID
+ * [127:96] - reserved
+ */
+
+ /**
+ * cik_ih_decode_iv - decode an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Decodes the interrupt vector at the current rptr
+ * position and also advance the position.
+ */
+static void cik_ih_decode_iv(struct amdgpu_device *adev,
+			     struct amdgpu_iv_entry *entry)
+{
+	/* wptr/rptr are in bytes! */
+	u32 ring_index = adev->irq.ih.rptr >> 2;
+	uint32_t dw[4];
+	
+	dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
+	dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
+	dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
+	dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+
+	entry->src_id = dw[0] & 0xff;
+	entry->src_data = dw[1] & 0xfffffff;
+	entry->ring_id = dw[2] & 0xff;
+	entry->vm_id = (dw[2] >> 8) & 0xff;
+	entry->pas_id = (dw[2] >> 16) & 0xffff;
+
+	/* wptr/rptr are in bytes! */
+	adev->irq.ih.rptr += 16;
+}
+
+/**
+ * cik_ih_set_rptr - set the IH ring buffer rptr
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set the IH ring buffer rptr.
+ */
+static void cik_ih_set_rptr(struct amdgpu_device *adev)
+{
+	WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr);
+}
+
+static int cik_ih_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	cik_ih_set_interrupt_funcs(adev);
+
+	return 0;
+}
+
+static int cik_ih_sw_init(void *handle)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
+	if (r)
+		return r;
+
+	r = amdgpu_irq_init(adev);
+
+	return r;
+}
+
+static int cik_ih_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	amdgpu_irq_fini(adev);
+	amdgpu_ih_ring_fini(adev);
+
+	return 0;
+}
+
+static int cik_ih_hw_init(void *handle)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = cik_ih_irq_init(adev);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+static int cik_ih_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	cik_ih_irq_disable(adev);
+
+	return 0;
+}
+
+static int cik_ih_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return cik_ih_hw_fini(adev);
+}
+
+static int cik_ih_resume(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return cik_ih_hw_init(adev);
+}
+
+static bool cik_ih_is_idle(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 tmp = RREG32(mmSRBM_STATUS);
+
+	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+		return false;
+
+	return true;
+}
+
+static int cik_ih_wait_for_idle(void *handle)
+{
+	unsigned i;
+	u32 tmp;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(mmSRBM_STATUS) & SRBM_STATUS__IH_BUSY_MASK;
+		if (!tmp)
+			return 0;
+		udelay(1);
+	}
+	return -ETIMEDOUT;
+}
+
+static void cik_ih_print_status(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	dev_info(adev->dev, "CIK IH registers\n");
+	dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
+		RREG32(mmSRBM_STATUS));
+	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
+		RREG32(mmSRBM_STATUS2));
+	dev_info(adev->dev, "  INTERRUPT_CNTL=0x%08X\n",
+		 RREG32(mmINTERRUPT_CNTL));
+	dev_info(adev->dev, "  INTERRUPT_CNTL2=0x%08X\n",
+		 RREG32(mmINTERRUPT_CNTL2));
+	dev_info(adev->dev, "  IH_CNTL=0x%08X\n",
+		 RREG32(mmIH_CNTL));
+	dev_info(adev->dev, "  IH_RB_CNTL=0x%08X\n",
+		 RREG32(mmIH_RB_CNTL));
+	dev_info(adev->dev, "  IH_RB_BASE=0x%08X\n",
+		 RREG32(mmIH_RB_BASE));
+	dev_info(adev->dev, "  IH_RB_WPTR_ADDR_LO=0x%08X\n",
+		 RREG32(mmIH_RB_WPTR_ADDR_LO));
+	dev_info(adev->dev, "  IH_RB_WPTR_ADDR_HI=0x%08X\n",
+		 RREG32(mmIH_RB_WPTR_ADDR_HI));
+	dev_info(adev->dev, "  IH_RB_RPTR=0x%08X\n",
+		 RREG32(mmIH_RB_RPTR));
+	dev_info(adev->dev, "  IH_RB_WPTR=0x%08X\n",
+		 RREG32(mmIH_RB_WPTR));
+}
+
+static int cik_ih_soft_reset(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	u32 srbm_soft_reset = 0;
+	u32 tmp = RREG32(mmSRBM_STATUS);
+
+	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
+
+	if (srbm_soft_reset) {
+		cik_ih_print_status((void *)adev);
+
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(mmSRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(mmSRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+
+		/* Wait a little for things to settle down */
+		udelay(50);
+
+		cik_ih_print_status((void *)adev);
+	}
+
+	return 0;
+}
+
+static int cik_ih_set_clockgating_state(void *handle,
+					  enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int cik_ih_set_powergating_state(void *handle,
+					  enum amd_powergating_state state)
+{
+	return 0;
+}
+
+const struct amd_ip_funcs cik_ih_ip_funcs = {
+	.early_init = cik_ih_early_init,
+	.late_init = NULL,
+	.sw_init = cik_ih_sw_init,
+	.sw_fini = cik_ih_sw_fini,
+	.hw_init = cik_ih_hw_init,
+	.hw_fini = cik_ih_hw_fini,
+	.suspend = cik_ih_suspend,
+	.resume = cik_ih_resume,
+	.is_idle = cik_ih_is_idle,
+	.wait_for_idle = cik_ih_wait_for_idle,
+	.soft_reset = cik_ih_soft_reset,
+	.print_status = cik_ih_print_status,
+	.set_clockgating_state = cik_ih_set_clockgating_state,
+	.set_powergating_state = cik_ih_set_powergating_state,
+};
+
+static const struct amdgpu_ih_funcs cik_ih_funcs = {
+	.get_wptr = cik_ih_get_wptr,
+	.decode_iv = cik_ih_decode_iv,
+	.set_rptr = cik_ih_set_rptr
+};
+
+static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
+{
+	if (adev->irq.ih_funcs == NULL)
+		adev->irq.ih_funcs = &cik_ih_funcs;
+}

+ 29 - 0
drivers/gpu/drm/amd/amdgpu/cik_ih.h

@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __CIK_IH_H__
+#define __CIK_IH_H__
+
+extern const struct amd_ip_funcs cik_ih_ip_funcs;
+
+#endif

+ 1405 - 0
drivers/gpu/drm/amd/amdgpu/cik_sdma.c

@@ -0,0 +1,1405 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_ucode.h"
+#include "amdgpu_trace.h"
+#include "cikd.h"
+#include "cik.h"
+
+#include "bif/bif_4_1_d.h"
+#include "bif/bif_4_1_sh_mask.h"
+
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_enum.h"
+#include "gca/gfx_7_2_sh_mask.h"
+
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+
+#include "oss/oss_2_0_d.h"
+#include "oss/oss_2_0_sh_mask.h"
+
+static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
+{
+	SDMA0_REGISTER_OFFSET,
+	SDMA1_REGISTER_OFFSET
+};
+
+static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
+static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
+static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
+static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
+
+MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
+MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
+MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
+MODULE_FIRMWARE("radeon/hawaii_sdma1.bin");
+MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
+MODULE_FIRMWARE("radeon/kaveri_sdma1.bin");
+MODULE_FIRMWARE("radeon/kabini_sdma.bin");
+MODULE_FIRMWARE("radeon/kabini_sdma1.bin");
+MODULE_FIRMWARE("radeon/mullins_sdma.bin");
+MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
+
+u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
+
+/*
+ * sDMA - System DMA
+ * Starting with CIK, the GPU has new asynchronous
+ * DMA engines.  These engines are used for compute
+ * and gfx.  There are two DMA engines (SDMA0, SDMA1)
+ * and each one supports 1 ring buffer used for gfx
+ * and 2 queues used for compute.
+ *
+ * The programming model is very similar to the CP
+ * (ring buffer, IBs, etc.), but sDMA has it's own
+ * packet format that is different from the PM4 format
+ * used by the CP. sDMA supports copying data, writing
+ * embedded data, solid fills, and a number of other
+ * things.  It also has support for tiling/detiling of
+ * buffers.
+ */
+
+/**
+ * cik_sdma_init_microcode - load ucode images from disk
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use the firmware interface to load the ucode images into
+ * the driver (not loaded into hw).
+ * Returns 0 on success, error on failure.
+ */
+static int cik_sdma_init_microcode(struct amdgpu_device *adev)
+{
+	const char *chip_name;
+	char fw_name[30];
+	int err, i;
+
+	DRM_DEBUG("\n");
+
+	switch (adev->asic_type) {
+	case CHIP_BONAIRE:
+		chip_name = "bonaire";
+		break;
+	case CHIP_HAWAII:
+		chip_name = "hawaii";
+		break;
+	case CHIP_KAVERI:
+		chip_name = "kaveri";
+		break;
+	case CHIP_KABINI:
+		chip_name = "kabini";
+		break;
+	case CHIP_MULLINS:
+		chip_name = "mullins";
+		break;
+	default: BUG();
+	}
+
+	for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+		if (i == 0)
+			snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
+		else
+			snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
+		err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev);
+		if (err)
+			goto out;
+		err = amdgpu_ucode_validate(adev->sdma[i].fw);
+	}
+out:
+	if (err) {
+		printk(KERN_ERR
+		       "cik_sdma: Failed to load firmware \"%s\"\n",
+		       fw_name);
+		for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+			release_firmware(adev->sdma[i].fw);
+			adev->sdma[i].fw = NULL;
+		}
+	}
+	return err;
+}
+
+/**
+ * cik_sdma_ring_get_rptr - get the current read pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current rptr from the hardware (CIK+).
+ */
+static uint32_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
+{
+	u32 rptr;
+
+	rptr = ring->adev->wb.wb[ring->rptr_offs];
+
+	return (rptr & 0x3fffc) >> 2;
+}
+
+/**
+ * cik_sdma_ring_get_wptr - get the current write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current wptr from the hardware (CIK+).
+ */
+static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1;
+
+	return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
+}
+
+/**
+ * cik_sdma_ring_set_wptr - commit the write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Write the wptr back to the hardware (CIK+).
+ */
+static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1;
+
+	WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
+}
+
+/**
+ * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine
+ *
+ * @ring: amdgpu ring pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (CIK).
+ */
+static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
+			   struct amdgpu_ib *ib)
+{
+	u32 extra_bits = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
+	u32 next_rptr = ring->wptr + 5;
+
+	while ((next_rptr & 7) != 4)
+		next_rptr++;
+
+	next_rptr += 4;
+	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
+	amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+	amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+	amdgpu_ring_write(ring, 1); /* number of DWs to follow */
+	amdgpu_ring_write(ring, next_rptr);
+
+	/* IB packet must end on a 8 DW boundary */
+	while ((ring->wptr & 7) != 4)
+		amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
+	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
+	amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
+	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
+	amdgpu_ring_write(ring, ib->length_dw);
+
+}
+
+/**
+ * cik_sdma_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Emit an hdp flush packet on the requested DMA ring.
+ */
+static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+	u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
+			  SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
+	u32 ref_and_mask;
+
+	if (ring == &ring->adev->sdma[0].ring)
+		ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
+	else
+		ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
+
+	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
+	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
+	amdgpu_ring_write(ring, ref_and_mask); /* reference */
+	amdgpu_ring_write(ring, ref_and_mask); /* mask */
+	amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
+}
+
+/**
+ * cik_sdma_ring_emit_fence - emit a fence on the DMA ring
+ *
+ * @ring: amdgpu ring pointer
+ * @fence: amdgpu fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (CIK).
+ */
+static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+				     bool write64bit)
+{
+	/* write the fence */
+	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
+	amdgpu_ring_write(ring, lower_32_bits(addr));
+	amdgpu_ring_write(ring, upper_32_bits(addr));
+	amdgpu_ring_write(ring, lower_32_bits(seq));
+
+	/* optionally write high bits as well */
+	if (write64bit) {
+		addr += 4;
+		amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
+		amdgpu_ring_write(ring, lower_32_bits(addr));
+		amdgpu_ring_write(ring, upper_32_bits(addr));
+		amdgpu_ring_write(ring, upper_32_bits(seq));
+	}
+
+	/* generate an interrupt */
+	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
+}
+
+/**
+ * cik_sdma_ring_emit_semaphore - emit a semaphore on the dma ring
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @semaphore: amdgpu semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (CIK).
+ */
+static bool cik_sdma_ring_emit_semaphore(struct amdgpu_ring *ring,
+					 struct amdgpu_semaphore *semaphore,
+					 bool emit_wait)
+{
+	u64 addr = semaphore->gpu_addr;
+	u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
+
+	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
+	amdgpu_ring_write(ring, addr & 0xfffffff8);
+	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+
+	return true;
+}
+
+/**
+ * cik_sdma_gfx_stop - stop the gfx async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the gfx async dma ring buffers (CIK).
+ */
+static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
+{
+	struct amdgpu_ring *sdma0 = &adev->sdma[0].ring;
+	struct amdgpu_ring *sdma1 = &adev->sdma[1].ring;
+	u32 rb_cntl;
+	int i;
+
+	if ((adev->mman.buffer_funcs_ring == sdma0) ||
+	    (adev->mman.buffer_funcs_ring == sdma1))
+		amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
+
+	for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+		rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
+		rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK;
+		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
+		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
+	}
+	sdma0->ready = false;
+	sdma1->ready = false;
+}
+
+/**
+ * cik_sdma_rlc_stop - stop the compute async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the compute async dma queues (CIK).
+ */
+static void cik_sdma_rlc_stop(struct amdgpu_device *adev)
+{
+	/* XXX todo */
+}
+
+/**
+ * cik_sdma_enable - stop the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable/disable the DMA MEs.
+ *
+ * Halt or unhalt the async dma engines (CIK).
+ */
+static void cik_sdma_enable(struct amdgpu_device *adev, bool enable)
+{
+	u32 me_cntl;
+	int i;
+
+	if (enable == false) {
+		cik_sdma_gfx_stop(adev);
+		cik_sdma_rlc_stop(adev);
+	}
+
+	for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+		me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
+		if (enable)
+			me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK;
+		else
+			me_cntl |= SDMA0_F32_CNTL__HALT_MASK;
+		WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], me_cntl);
+	}
+}
+
+/**
+ * cik_sdma_gfx_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the gfx DMA ring buffers and enable them (CIK).
+ * Returns 0 for success, error for failure.
+ */
+static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
+{
+	struct amdgpu_ring *ring;
+	u32 rb_cntl, ib_cntl;
+	u32 rb_bufsz;
+	u32 wb_offset;
+	int i, j, r;
+
+	for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+		ring = &adev->sdma[i].ring;
+		wb_offset = (ring->rptr_offs * 4);
+
+		mutex_lock(&adev->srbm_mutex);
+		for (j = 0; j < 16; j++) {
+			cik_srbm_select(adev, 0, 0, 0, j);
+			/* SDMA GFX */
+			WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
+			WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
+			/* XXX SDMA RLC - todo */
+		}
+		cik_srbm_select(adev, 0, 0, 0, 0);
+		mutex_unlock(&adev->srbm_mutex);
+
+		WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
+		WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
+
+		/* Set ring buffer size in dwords */
+		rb_bufsz = order_base_2(ring->ring_size / 4);
+		rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+		rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
+
+		/* Initialize the ring buffer's read and write pointers */
+		WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
+		WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+
+		/* set the wb address whether it's enabled or not */
+		WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
+		       upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+		WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
+		       ((adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
+
+		rb_cntl |= SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK;
+
+		WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
+		WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
+
+		ring->wptr = 0;
+		WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
+
+		/* enable DMA RB */
+		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i],
+		       rb_cntl | SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK);
+
+		ib_cntl = SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK;
+#ifdef __BIG_ENDIAN
+		ib_cntl |= SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK;
+#endif
+		/* enable DMA IBs */
+		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
+
+		ring->ready = true;
+
+		r = amdgpu_ring_test_ring(ring);
+		if (r) {
+			ring->ready = false;
+			return r;
+		}
+
+		if (adev->mman.buffer_funcs_ring == ring)
+			amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
+	}
+
+	return 0;
+}
+
+/**
+ * cik_sdma_rlc_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the compute DMA queues and enable them (CIK).
+ * Returns 0 for success, error for failure.
+ */
+static int cik_sdma_rlc_resume(struct amdgpu_device *adev)
+{
+	/* XXX todo */
+	return 0;
+}
+
+/**
+ * cik_sdma_load_microcode - load the sDMA ME ucode
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Loads the sDMA0/1 ucode.
+ * Returns 0 for success, -EINVAL if the ucode is not available.
+ */
+static int cik_sdma_load_microcode(struct amdgpu_device *adev)
+{
+	const struct sdma_firmware_header_v1_0 *hdr;
+	const __le32 *fw_data;
+	u32 fw_size;
+	int i, j;
+
+	if (!adev->sdma[0].fw || !adev->sdma[1].fw)
+		return -EINVAL;
+
+	/* halt the MEs */
+	cik_sdma_enable(adev, false);
+
+	for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
+		amdgpu_ucode_print_sdma_hdr(&hdr->header);
+		fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+		adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+		fw_data = (const __le32 *)
+			(adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+		WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
+		for (j = 0; j < fw_size; j++)
+			WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
+		WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version);
+	}
+
+	return 0;
+}
+
+/**
+ * cik_sdma_start - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the DMA engines and enable them (CIK).
+ * Returns 0 for success, error for failure.
+ */
+static int cik_sdma_start(struct amdgpu_device *adev)
+{
+	int r;
+
+	r = cik_sdma_load_microcode(adev);
+	if (r)
+		return r;
+
+	/* unhalt the MEs */
+	cik_sdma_enable(adev, true);
+
+	/* start the gfx rings and rlc compute queues */
+	r = cik_sdma_gfx_resume(adev);
+	if (r)
+		return r;
+	r = cik_sdma_rlc_resume(adev);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+/**
+ * cik_sdma_ring_test_ring - simple async dma engine test
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (CIK).
+ * Returns 0 for success, error for failure.
+ */
+static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	unsigned i;
+	unsigned index;
+	int r;
+	u32 tmp;
+	u64 gpu_addr;
+
+	r = amdgpu_wb_get(adev, &index);
+	if (r) {
+		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+		return r;
+	}
+
+	gpu_addr = adev->wb.gpu_addr + (index * 4);
+	tmp = 0xCAFEDEAD;
+	adev->wb.wb[index] = cpu_to_le32(tmp);
+
+	r = amdgpu_ring_lock(ring, 5);
+	if (r) {
+		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
+		amdgpu_wb_free(adev, index);
+		return r;
+	}
+	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
+	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
+	amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
+	amdgpu_ring_write(ring, 1); /* number of DWs to follow */
+	amdgpu_ring_write(ring, 0xDEADBEEF);
+	amdgpu_ring_unlock_commit(ring);
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		tmp = le32_to_cpu(adev->wb.wb[index]);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+
+	if (i < adev->usec_timeout) {
+		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+	} else {
+		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
+			  ring->idx, tmp);
+		r = -EINVAL;
+	}
+	amdgpu_wb_free(adev, index);
+
+	return r;
+}
+
+/**
+ * cik_sdma_ring_test_ib - test an IB on the DMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (CIK).
+ * Returns 0 on success, error on failure.
+ */
+static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	struct amdgpu_ib ib;
+	unsigned i;
+	unsigned index;
+	int r;
+	u32 tmp = 0;
+	u64 gpu_addr;
+
+	r = amdgpu_wb_get(adev, &index);
+	if (r) {
+		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+		return r;
+	}
+
+	gpu_addr = adev->wb.gpu_addr + (index * 4);
+	tmp = 0xCAFEDEAD;
+	adev->wb.wb[index] = cpu_to_le32(tmp);
+
+	r = amdgpu_ib_get(ring, NULL, 256, &ib);
+	if (r) {
+		amdgpu_wb_free(adev, index);
+		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+		return r;
+	}
+
+	ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+	ib.ptr[1] = lower_32_bits(gpu_addr);
+	ib.ptr[2] = upper_32_bits(gpu_addr);
+	ib.ptr[3] = 1;
+	ib.ptr[4] = 0xDEADBEEF;
+	ib.length_dw = 5;
+
+	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
+	if (r) {
+		amdgpu_ib_free(adev, &ib);
+		amdgpu_wb_free(adev, index);
+		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
+		return r;
+	}
+	r = amdgpu_fence_wait(ib.fence, false);
+	if (r) {
+		amdgpu_ib_free(adev, &ib);
+		amdgpu_wb_free(adev, index);
+		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+		return r;
+	}
+	for (i = 0; i < adev->usec_timeout; i++) {
+		tmp = le32_to_cpu(adev->wb.wb[index]);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+	if (i < adev->usec_timeout) {
+		DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
+			 ib.fence->ring->idx, i);
+	} else {
+		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+		r = -EINVAL;
+	}
+	amdgpu_ib_free(adev, &ib);
+	amdgpu_wb_free(adev, index);
+	return r;
+}
+
+/**
+ * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using sDMA (CIK).
+ */
+static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
+				 uint64_t pe, uint64_t src,
+				 unsigned count)
+{
+	while (count) {
+		unsigned bytes = count * 8;
+		if (bytes > 0x1FFFF8)
+			bytes = 0x1FFFF8;
+
+		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
+			SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+		ib->ptr[ib->length_dw++] = bytes;
+		ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+		ib->ptr[ib->length_dw++] = lower_32_bits(src);
+		ib->ptr[ib->length_dw++] = upper_32_bits(src);
+		ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+
+		pe += bytes;
+		src += bytes;
+		count -= bytes / 8;
+	}
+}
+
+/**
+ * cik_sdma_vm_write_pages - update PTEs by writing them manually
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update PTEs by writing them manually using sDMA (CIK).
+ */
+static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
+				  uint64_t pe,
+				  uint64_t addr, unsigned count,
+				  uint32_t incr, uint32_t flags)
+{
+	uint64_t value;
+	unsigned ndw;
+
+	while (count) {
+		ndw = count * 2;
+		if (ndw > 0xFFFFE)
+			ndw = 0xFFFFE;
+
+		/* for non-physically contiguous pages (system) */
+		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
+			SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+		ib->ptr[ib->length_dw++] = pe;
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+		ib->ptr[ib->length_dw++] = ndw;
+		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+			if (flags & AMDGPU_PTE_SYSTEM) {
+				value = amdgpu_vm_map_gart(ib->ring->adev, addr);
+				value &= 0xFFFFFFFFFFFFF000ULL;
+			} else if (flags & AMDGPU_PTE_VALID) {
+				value = addr;
+			} else {
+				value = 0;
+			}
+			addr += incr;
+			value |= flags;
+			ib->ptr[ib->length_dw++] = value;
+			ib->ptr[ib->length_dw++] = upper_32_bits(value);
+		}
+	}
+}
+
+/**
+ * cik_sdma_vm_set_pages - update the page tables using sDMA
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA (CIK).
+ */
+static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
+				    uint64_t pe,
+				    uint64_t addr, unsigned count,
+				    uint32_t incr, uint32_t flags)
+{
+	uint64_t value;
+	unsigned ndw;
+
+	while (count) {
+		ndw = count;
+		if (ndw > 0x7FFFF)
+			ndw = 0x7FFFF;
+
+		if (flags & AMDGPU_PTE_VALID)
+			value = addr;
+		else
+			value = 0;
+
+		/* for physically contiguous pages (vram) */
+		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
+		ib->ptr[ib->length_dw++] = pe; /* dst addr */
+		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+		ib->ptr[ib->length_dw++] = flags; /* mask */
+		ib->ptr[ib->length_dw++] = 0;
+		ib->ptr[ib->length_dw++] = value; /* value */
+		ib->ptr[ib->length_dw++] = upper_32_bits(value);
+		ib->ptr[ib->length_dw++] = incr; /* increment size */
+		ib->ptr[ib->length_dw++] = 0;
+		ib->ptr[ib->length_dw++] = ndw; /* number of entries */
+
+		pe += ndw * 8;
+		addr += ndw * incr;
+		count -= ndw;
+	}
+}
+
+/**
+ * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib)
+{
+	while (ib->length_dw & 0x7)
+		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
+}
+
+/**
+ * cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA
+ *
+ * @ring: amdgpu_ring pointer
+ * @vm: amdgpu_vm pointer
+ *
+ * Update the page table base and flush the VM TLB
+ * using sDMA (CIK).
+ */
+static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
+					unsigned vm_id, uint64_t pd_addr)
+{
+	u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
+			  SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
+
+	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+	if (vm_id < 8) {
+		amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+	} else {
+		amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
+	}
+	amdgpu_ring_write(ring, pd_addr >> 12);
+
+	/* flush TLB */
+	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
+	amdgpu_ring_write(ring, 1 << vm_id);
+
+	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
+	amdgpu_ring_write(ring, 0);
+	amdgpu_ring_write(ring, 0); /* reference */
+	amdgpu_ring_write(ring, 0); /* mask */
+	amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
+}
+
+static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
+				 bool enable)
+{
+	u32 orig, data;
+
+	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_MGCG)) {
+		WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
+		WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
+	} else {
+		orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
+		data |= 0xff000000;
+		if (data != orig)
+			WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
+
+		orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
+		data |= 0xff000000;
+		if (data != orig)
+			WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
+	}
+}
+
+static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
+				 bool enable)
+{
+	u32 orig, data;
+
+	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_LS)) {
+		orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
+		data |= 0x100;
+		if (orig != data)
+			WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
+
+		orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
+		data |= 0x100;
+		if (orig != data)
+			WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
+	} else {
+		orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
+		data &= ~0x100;
+		if (orig != data)
+			WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
+
+		orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
+		data &= ~0x100;
+		if (orig != data)
+			WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
+	}
+}
+
+static int cik_sdma_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	cik_sdma_set_ring_funcs(adev);
+	cik_sdma_set_irq_funcs(adev);
+	cik_sdma_set_buffer_funcs(adev);
+	cik_sdma_set_vm_pte_funcs(adev);
+
+	return 0;
+}
+
+static int cik_sdma_sw_init(void *handle)
+{
+	struct amdgpu_ring *ring;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int r;
+
+	r = cik_sdma_init_microcode(adev);
+	if (r) {
+		DRM_ERROR("Failed to load sdma firmware!\n");
+		return r;
+	}
+
+	/* SDMA trap event */
+	r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq);
+	if (r)
+		return r;
+
+	/* SDMA Privileged inst */
+	r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq);
+	if (r)
+		return r;
+
+	/* SDMA Privileged inst */
+	r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq);
+	if (r)
+		return r;
+
+	ring = &adev->sdma[0].ring;
+	ring->ring_obj = NULL;
+
+	ring = &adev->sdma[1].ring;
+	ring->ring_obj = NULL;
+
+	ring = &adev->sdma[0].ring;
+	sprintf(ring->name, "sdma0");
+	r = amdgpu_ring_init(adev, ring, 256 * 1024,
+			     SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
+			     &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0,
+			     AMDGPU_RING_TYPE_SDMA);
+	if (r)
+		return r;
+
+	ring = &adev->sdma[1].ring;
+	sprintf(ring->name, "sdma1");
+	r = amdgpu_ring_init(adev, ring, 256 * 1024,
+			     SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
+			     &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
+			     AMDGPU_RING_TYPE_SDMA);
+	if (r)
+		return r;
+
+	return r;
+}
+
+static int cik_sdma_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	amdgpu_ring_fini(&adev->sdma[0].ring);
+	amdgpu_ring_fini(&adev->sdma[1].ring);
+
+	return 0;
+}
+
+static int cik_sdma_hw_init(void *handle)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = cik_sdma_start(adev);
+	if (r)
+		return r;
+
+	return r;
+}
+
+static int cik_sdma_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	cik_sdma_enable(adev, false);
+
+	return 0;
+}
+
+static int cik_sdma_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return cik_sdma_hw_fini(adev);
+}
+
+static int cik_sdma_resume(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return cik_sdma_hw_init(adev);
+}
+
+static bool cik_sdma_is_idle(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 tmp = RREG32(mmSRBM_STATUS2);
+
+	if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
+				SRBM_STATUS2__SDMA1_BUSY_MASK))
+	    return false;
+
+	return true;
+}
+
+static int cik_sdma_wait_for_idle(void *handle)
+{
+	unsigned i;
+	u32 tmp;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
+				SRBM_STATUS2__SDMA1_BUSY_MASK);
+
+		if (!tmp)
+			return 0;
+		udelay(1);
+	}
+	return -ETIMEDOUT;
+}
+
+static void cik_sdma_print_status(void *handle)
+{
+	int i, j;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	dev_info(adev->dev, "CIK SDMA registers\n");
+	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
+		 RREG32(mmSRBM_STATUS2));
+	for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+		dev_info(adev->dev, "  SDMA%d_STATUS_REG=0x%08X\n",
+			 i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
+		dev_info(adev->dev, "  SDMA%d_ME_CNTL=0x%08X\n",
+			 i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
+		dev_info(adev->dev, "  SDMA%d_CNTL=0x%08X\n",
+			 i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
+		dev_info(adev->dev, "  SDMA%d_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n",
+			 i, RREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i]));
+		dev_info(adev->dev, "  SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
+			 i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
+		dev_info(adev->dev, "  SDMA%d_GFX_IB_CNTL=0x%08X\n",
+			 i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
+		dev_info(adev->dev, "  SDMA%d_GFX_RB_CNTL=0x%08X\n",
+			 i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
+		dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR=0x%08X\n",
+			 i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
+		dev_info(adev->dev, "  SDMA%d_GFX_RB_WPTR=0x%08X\n",
+			 i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
+		dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
+			 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
+		dev_info(adev->dev, "  SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
+			 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
+		dev_info(adev->dev, "  SDMA%d_GFX_RB_BASE=0x%08X\n",
+			 i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
+		dev_info(adev->dev, "  SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
+			 i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
+		mutex_lock(&adev->srbm_mutex);
+		for (j = 0; j < 16; j++) {
+			cik_srbm_select(adev, 0, 0, 0, j);
+			dev_info(adev->dev, "  VM %d:\n", j);
+			dev_info(adev->dev, "  SDMA0_GFX_VIRTUAL_ADDR=0x%08X\n",
+				 RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
+			dev_info(adev->dev, "  SDMA0_GFX_APE1_CNTL=0x%08X\n",
+				 RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
+		}
+		cik_srbm_select(adev, 0, 0, 0, 0);
+		mutex_unlock(&adev->srbm_mutex);
+	}
+}
+
+static int cik_sdma_soft_reset(void *handle)
+{
+	u32 srbm_soft_reset = 0;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 tmp = RREG32(mmSRBM_STATUS2);
+
+	if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
+		/* sdma0 */
+		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
+		tmp |= SDMA0_F32_CNTL__HALT_MASK;
+		WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
+	}
+	if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
+		/* sdma1 */
+		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
+		tmp |= SDMA0_F32_CNTL__HALT_MASK;
+		WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
+	}
+
+	if (srbm_soft_reset) {
+		cik_sdma_print_status((void *)adev);
+
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(mmSRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(mmSRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+
+		/* Wait a little for things to settle down */
+		udelay(50);
+
+		cik_sdma_print_status((void *)adev);
+	}
+
+	return 0;
+}
+
+static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
+				       struct amdgpu_irq_src *src,
+				       unsigned type,
+				       enum amdgpu_interrupt_state state)
+{
+	u32 sdma_cntl;
+
+	switch (type) {
+	case AMDGPU_SDMA_IRQ_TRAP0:
+		switch (state) {
+		case AMDGPU_IRQ_STATE_DISABLE:
+			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
+			sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK;
+			WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
+			break;
+		case AMDGPU_IRQ_STATE_ENABLE:
+			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
+			sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK;
+			WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
+			break;
+		default:
+			break;
+		}
+		break;
+	case AMDGPU_SDMA_IRQ_TRAP1:
+		switch (state) {
+		case AMDGPU_IRQ_STATE_DISABLE:
+			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
+			sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK;
+			WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
+			break;
+		case AMDGPU_IRQ_STATE_ENABLE:
+			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
+			sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK;
+			WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int cik_sdma_process_trap_irq(struct amdgpu_device *adev,
+				     struct amdgpu_irq_src *source,
+				     struct amdgpu_iv_entry *entry)
+{
+	u8 instance_id, queue_id;
+
+	instance_id = (entry->ring_id & 0x3) >> 0;
+	queue_id = (entry->ring_id & 0xc) >> 2;
+	DRM_DEBUG("IH: SDMA trap\n");
+	switch (instance_id) {
+	case 0:
+		switch (queue_id) {
+		case 0:
+			amdgpu_fence_process(&adev->sdma[0].ring);
+			break;
+		case 1:
+			/* XXX compute */
+			break;
+		case 2:
+			/* XXX compute */
+			break;
+		}
+		break;
+	case 1:
+		switch (queue_id) {
+		case 0:
+			amdgpu_fence_process(&adev->sdma[1].ring);
+			break;
+		case 1:
+			/* XXX compute */
+			break;
+		case 2:
+			/* XXX compute */
+			break;
+		}
+		break;
+	}
+
+	return 0;
+}
+
+static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev,
+					     struct amdgpu_irq_src *source,
+					     struct amdgpu_iv_entry *entry)
+{
+	DRM_ERROR("Illegal instruction in SDMA command stream\n");
+	schedule_work(&adev->reset_work);
+	return 0;
+}
+
+static int cik_sdma_set_clockgating_state(void *handle,
+					  enum amd_clockgating_state state)
+{
+	bool gate = false;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (state == AMD_CG_STATE_GATE)
+		gate = true;
+
+	cik_enable_sdma_mgcg(adev, gate);
+	cik_enable_sdma_mgls(adev, gate);
+
+	return 0;
+}
+
+static int cik_sdma_set_powergating_state(void *handle,
+					  enum amd_powergating_state state)
+{
+	return 0;
+}
+
+const struct amd_ip_funcs cik_sdma_ip_funcs = {
+	.early_init = cik_sdma_early_init,
+	.late_init = NULL,
+	.sw_init = cik_sdma_sw_init,
+	.sw_fini = cik_sdma_sw_fini,
+	.hw_init = cik_sdma_hw_init,
+	.hw_fini = cik_sdma_hw_fini,
+	.suspend = cik_sdma_suspend,
+	.resume = cik_sdma_resume,
+	.is_idle = cik_sdma_is_idle,
+	.wait_for_idle = cik_sdma_wait_for_idle,
+	.soft_reset = cik_sdma_soft_reset,
+	.print_status = cik_sdma_print_status,
+	.set_clockgating_state = cik_sdma_set_clockgating_state,
+	.set_powergating_state = cik_sdma_set_powergating_state,
+};
+
+/**
+ * cik_sdma_ring_is_lockup - Check if the DMA engine is locked up
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (CIK).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+static bool cik_sdma_ring_is_lockup(struct amdgpu_ring *ring)
+{
+
+	if (cik_sdma_is_idle(ring->adev)) {
+		amdgpu_ring_lockup_update(ring);
+		return false;
+	}
+	return amdgpu_ring_test_lockup(ring);
+}
+
+static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
+	.get_rptr = cik_sdma_ring_get_rptr,
+	.get_wptr = cik_sdma_ring_get_wptr,
+	.set_wptr = cik_sdma_ring_set_wptr,
+	.parse_cs = NULL,
+	.emit_ib = cik_sdma_ring_emit_ib,
+	.emit_fence = cik_sdma_ring_emit_fence,
+	.emit_semaphore = cik_sdma_ring_emit_semaphore,
+	.emit_vm_flush = cik_sdma_ring_emit_vm_flush,
+	.emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
+	.test_ring = cik_sdma_ring_test_ring,
+	.test_ib = cik_sdma_ring_test_ib,
+	.is_lockup = cik_sdma_ring_is_lockup,
+};
+
+static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
+{
+	adev->sdma[0].ring.funcs = &cik_sdma_ring_funcs;
+	adev->sdma[1].ring.funcs = &cik_sdma_ring_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
+	.set = cik_sdma_set_trap_irq_state,
+	.process = cik_sdma_process_trap_irq,
+};
+
+static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = {
+	.process = cik_sdma_process_illegal_inst_irq,
+};
+
+static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
+{
+	adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+	adev->sdma_trap_irq.funcs = &cik_sdma_trap_irq_funcs;
+	adev->sdma_illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs;
+}
+
+/**
+ * cik_sdma_emit_copy_buffer - copy buffer using the sDMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Copy GPU buffers using the DMA engine (CIK).
+ * Used by the amdgpu ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+static void cik_sdma_emit_copy_buffer(struct amdgpu_ring *ring,
+				      uint64_t src_offset,
+				      uint64_t dst_offset,
+				      uint32_t byte_count)
+{
+	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
+	amdgpu_ring_write(ring, byte_count);
+	amdgpu_ring_write(ring, 0); /* src/dst endian swap */
+	amdgpu_ring_write(ring, lower_32_bits(src_offset));
+	amdgpu_ring_write(ring, upper_32_bits(src_offset));
+	amdgpu_ring_write(ring, lower_32_bits(dst_offset));
+	amdgpu_ring_write(ring, upper_32_bits(dst_offset));
+}
+
+/**
+ * cik_sdma_emit_fill_buffer - fill buffer using the sDMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @src_data: value to write to buffer
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Fill GPU buffers using the DMA engine (CIK).
+ */
+static void cik_sdma_emit_fill_buffer(struct amdgpu_ring *ring,
+				      uint32_t src_data,
+				      uint64_t dst_offset,
+				      uint32_t byte_count)
+{
+	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0));
+	amdgpu_ring_write(ring, lower_32_bits(dst_offset));
+	amdgpu_ring_write(ring, upper_32_bits(dst_offset));
+	amdgpu_ring_write(ring, src_data);
+	amdgpu_ring_write(ring, byte_count);
+}
+
+static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = {
+	.copy_max_bytes = 0x1fffff,
+	.copy_num_dw = 7,
+	.emit_copy_buffer = cik_sdma_emit_copy_buffer,
+
+	.fill_max_bytes = 0x1fffff,
+	.fill_num_dw = 5,
+	.emit_fill_buffer = cik_sdma_emit_fill_buffer,
+};
+
+static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
+{
+	if (adev->mman.buffer_funcs == NULL) {
+		adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
+		adev->mman.buffer_funcs_ring = &adev->sdma[0].ring;
+	}
+}
+
+static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
+	.copy_pte = cik_sdma_vm_copy_pte,
+	.write_pte = cik_sdma_vm_write_pte,
+	.set_pte_pde = cik_sdma_vm_set_pte_pde,
+	.pad_ib = cik_sdma_vm_pad_ib,
+};
+
+static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
+{
+	if (adev->vm_manager.vm_pte_funcs == NULL) {
+		adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
+		adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
+	}
+}

+ 29 - 0
drivers/gpu/drm/amd/amdgpu/cik_sdma.h

@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __CIK_SDMA_H__
+#define __CIK_SDMA_H__
+
+extern const struct amd_ip_funcs cik_sdma_ip_funcs;
+
+#endif

+ 555 - 0
drivers/gpu/drm/amd/amdgpu/cikd.h

@@ -0,0 +1,555 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef CIK_H
+#define CIK_H
+
+#define MC_SEQ_MISC0__MT__MASK	0xf0000000
+#define MC_SEQ_MISC0__MT__GDDR1  0x10000000
+#define MC_SEQ_MISC0__MT__DDR2   0x20000000
+#define MC_SEQ_MISC0__MT__GDDR3  0x30000000
+#define MC_SEQ_MISC0__MT__GDDR4  0x40000000
+#define MC_SEQ_MISC0__MT__GDDR5  0x50000000
+#define MC_SEQ_MISC0__MT__HBM    0x60000000
+#define MC_SEQ_MISC0__MT__DDR3   0xB0000000
+
+#define CP_ME_TABLE_SIZE    96
+
+/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
+#define CRTC0_REGISTER_OFFSET                 (0x1b7c - 0x1b7c)
+#define CRTC1_REGISTER_OFFSET                 (0x1e7c - 0x1b7c)
+#define CRTC2_REGISTER_OFFSET                 (0x417c - 0x1b7c)
+#define CRTC3_REGISTER_OFFSET                 (0x447c - 0x1b7c)
+#define CRTC4_REGISTER_OFFSET                 (0x477c - 0x1b7c)
+#define CRTC5_REGISTER_OFFSET                 (0x4a7c - 0x1b7c)
+
+#define BONAIRE_GB_ADDR_CONFIG_GOLDEN        0x12010001
+#define HAWAII_GB_ADDR_CONFIG_GOLDEN         0x12011003
+
+#define CIK_RB_BITMAP_WIDTH_PER_SH     2
+#define HAWAII_RB_BITMAP_WIDTH_PER_SH  4
+
+#define AMDGPU_NUM_OF_VMIDS	8
+
+#define		PIPEID(x)					((x) << 0)
+#define		MEID(x)						((x) << 2)
+#define		VMID(x)						((x) << 4)
+#define		QUEUEID(x)					((x) << 8)
+
+#define mmCC_DRM_ID_STRAPS				0x1559
+#define CC_DRM_ID_STRAPS__ATI_REV_ID_MASK		0xf0000000
+
+#define mmCHUB_CONTROL					0x619
+#define		BYPASS_VM					(1 << 0)
+
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU	(0 << 5)
+
+#define mmGRPH_LUT_10BIT_BYPASS_CONTROL			0x1a02
+#define		LUT_10BIT_BYPASS_EN			(1 << 8)
+
+#       define CURSOR_MONO                    0
+#       define CURSOR_24_1                    1
+#       define CURSOR_24_8_PRE_MULT           2
+#       define CURSOR_24_8_UNPRE_MULT         3
+#       define CURSOR_URGENT_ALWAYS           0
+#       define CURSOR_URGENT_1_8              1
+#       define CURSOR_URGENT_1_4              2
+#       define CURSOR_URGENT_3_8              3
+#       define CURSOR_URGENT_1_2              4
+
+#       define GRPH_DEPTH_8BPP                0
+#       define GRPH_DEPTH_16BPP               1
+#       define GRPH_DEPTH_32BPP               2
+/* 8 BPP */
+#       define GRPH_FORMAT_INDEXED            0
+/* 16 BPP */
+#       define GRPH_FORMAT_ARGB1555           0
+#       define GRPH_FORMAT_ARGB565            1
+#       define GRPH_FORMAT_ARGB4444           2
+#       define GRPH_FORMAT_AI88               3
+#       define GRPH_FORMAT_MONO16             4
+#       define GRPH_FORMAT_BGRA5551           5
+/* 32 BPP */
+#       define GRPH_FORMAT_ARGB8888           0
+#       define GRPH_FORMAT_ARGB2101010        1
+#       define GRPH_FORMAT_32BPP_DIG          2
+#       define GRPH_FORMAT_8B_ARGB2101010     3
+#       define GRPH_FORMAT_BGRA1010102        4
+#       define GRPH_FORMAT_8B_BGRA1010102     5
+#       define GRPH_FORMAT_RGB111110          6
+#       define GRPH_FORMAT_BGR101111          7
+#       define ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#       define ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#       define ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#       define ADDR_SURF_MACRO_TILE_ASPECT_8  3
+#       define GRPH_ARRAY_LINEAR_GENERAL      0
+#       define GRPH_ARRAY_LINEAR_ALIGNED      1
+#       define GRPH_ARRAY_1D_TILED_THIN1      2
+#       define GRPH_ARRAY_2D_TILED_THIN1      4
+#       define DISPLAY_MICRO_TILING          0
+#       define THIN_MICRO_TILING             1
+#       define DEPTH_MICRO_TILING            2
+#       define ROTATED_MICRO_TILING          4
+#       define GRPH_ENDIAN_NONE               0
+#       define GRPH_ENDIAN_8IN16              1
+#       define GRPH_ENDIAN_8IN32              2
+#       define GRPH_ENDIAN_8IN64              3
+#       define GRPH_RED_SEL_R                 0
+#       define GRPH_RED_SEL_G                 1
+#       define GRPH_RED_SEL_B                 2
+#       define GRPH_RED_SEL_A                 3
+#       define GRPH_GREEN_SEL_G               0
+#       define GRPH_GREEN_SEL_B               1
+#       define GRPH_GREEN_SEL_A               2
+#       define GRPH_GREEN_SEL_R               3
+#       define GRPH_BLUE_SEL_B                0
+#       define GRPH_BLUE_SEL_A                1
+#       define GRPH_BLUE_SEL_R                2
+#       define GRPH_BLUE_SEL_G                3
+#       define GRPH_ALPHA_SEL_A               0
+#       define GRPH_ALPHA_SEL_R               1
+#       define GRPH_ALPHA_SEL_G               2
+#       define GRPH_ALPHA_SEL_B               3
+#       define INPUT_GAMMA_USE_LUT                  0
+#       define INPUT_GAMMA_BYPASS                   1
+#       define INPUT_GAMMA_SRGB_24                  2
+#       define INPUT_GAMMA_XVYCC_222                3
+
+#       define INPUT_CSC_BYPASS                     0
+#       define INPUT_CSC_PROG_COEFF                 1
+#       define INPUT_CSC_PROG_SHARED_MATRIXA        2
+
+#       define OUTPUT_CSC_BYPASS                    0
+#       define OUTPUT_CSC_TV_RGB                    1
+#       define OUTPUT_CSC_YCBCR_601                 2
+#       define OUTPUT_CSC_YCBCR_709                 3
+#       define OUTPUT_CSC_PROG_COEFF                4
+#       define OUTPUT_CSC_PROG_SHARED_MATRIXB       5
+
+#       define DEGAMMA_BYPASS                       0
+#       define DEGAMMA_SRGB_24                      1
+#       define DEGAMMA_XVYCC_222                    2
+#       define GAMUT_REMAP_BYPASS                   0
+#       define GAMUT_REMAP_PROG_COEFF               1
+#       define GAMUT_REMAP_PROG_SHARED_MATRIXA      2
+#       define GAMUT_REMAP_PROG_SHARED_MATRIXB      3
+
+#       define REGAMMA_BYPASS                       0
+#       define REGAMMA_SRGB_24                      1
+#       define REGAMMA_XVYCC_222                    2
+#       define REGAMMA_PROG_A                       3
+#       define REGAMMA_PROG_B                       4
+
+#       define FMT_CLAMP_6BPC                0
+#       define FMT_CLAMP_8BPC                1
+#       define FMT_CLAMP_10BPC               2
+
+#       define HDMI_24BIT_DEEP_COLOR         0
+#       define HDMI_30BIT_DEEP_COLOR         1
+#       define HDMI_36BIT_DEEP_COLOR         2
+#       define HDMI_ACR_HW                   0
+#       define HDMI_ACR_32                   1
+#       define HDMI_ACR_44                   2
+#       define HDMI_ACR_48                   3
+#       define HDMI_ACR_X1                   1
+#       define HDMI_ACR_X2                   2
+#       define HDMI_ACR_X4                   4
+#       define AFMT_AVI_INFO_Y_RGB           0
+#       define AFMT_AVI_INFO_Y_YCBCR422      1
+#       define AFMT_AVI_INFO_Y_YCBCR444      2
+
+#define			NO_AUTO						0
+#define			ES_AUTO						1
+#define			GS_AUTO						2
+#define			ES_AND_GS_AUTO					3
+
+#       define ARRAY_MODE(x)					((x) << 2)
+#       define PIPE_CONFIG(x)					((x) << 6)
+#       define TILE_SPLIT(x)					((x) << 11)
+#       define MICRO_TILE_MODE_NEW(x)				((x) << 22)
+#       define SAMPLE_SPLIT(x)					((x) << 25)
+#       define BANK_WIDTH(x)					((x) << 0)
+#       define BANK_HEIGHT(x)					((x) << 2)
+#       define MACRO_TILE_ASPECT(x)				((x) << 4)
+#       define NUM_BANKS(x)					((x) << 6)
+
+#define		MSG_ENTER_RLC_SAFE_MODE      			1
+#define		MSG_EXIT_RLC_SAFE_MODE      			0
+
+/*
+ * PM4
+ */
+#define	PACKET_TYPE0	0
+#define	PACKET_TYPE1	1
+#define	PACKET_TYPE2	2
+#define	PACKET_TYPE3	3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) ((h) & 0xFFFF)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define PACKET0(reg, n)	((PACKET_TYPE0 << 30) |				\
+			 ((reg) & 0xFFFF) |			\
+			 ((n) & 0x3FFF) << 16)
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define PACKET3(op, n)	((PACKET_TYPE3 << 30) |				\
+			 (((op) & 0xFF) << 8) |				\
+			 ((n) & 0x3FFF) << 16)
+
+#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
+
+/* Packet 3 types */
+#define	PACKET3_NOP					0x10
+#define	PACKET3_SET_BASE				0x11
+#define		PACKET3_BASE_INDEX(x)                  ((x) << 0)
+#define			CE_PARTITION_BASE		3
+#define	PACKET3_CLEAR_STATE				0x12
+#define	PACKET3_INDEX_BUFFER_SIZE			0x13
+#define	PACKET3_DISPATCH_DIRECT				0x15
+#define	PACKET3_DISPATCH_INDIRECT			0x16
+#define	PACKET3_ATOMIC_GDS				0x1D
+#define	PACKET3_ATOMIC_MEM				0x1E
+#define	PACKET3_OCCLUSION_QUERY				0x1F
+#define	PACKET3_SET_PREDICATION				0x20
+#define	PACKET3_REG_RMW					0x21
+#define	PACKET3_COND_EXEC				0x22
+#define	PACKET3_PRED_EXEC				0x23
+#define	PACKET3_DRAW_INDIRECT				0x24
+#define	PACKET3_DRAW_INDEX_INDIRECT			0x25
+#define	PACKET3_INDEX_BASE				0x26
+#define	PACKET3_DRAW_INDEX_2				0x27
+#define	PACKET3_CONTEXT_CONTROL				0x28
+#define	PACKET3_INDEX_TYPE				0x2A
+#define	PACKET3_DRAW_INDIRECT_MULTI			0x2C
+#define	PACKET3_DRAW_INDEX_AUTO				0x2D
+#define	PACKET3_NUM_INSTANCES				0x2F
+#define	PACKET3_DRAW_INDEX_MULTI_AUTO			0x30
+#define	PACKET3_INDIRECT_BUFFER_CONST			0x33
+#define	PACKET3_STRMOUT_BUFFER_UPDATE			0x34
+#define	PACKET3_DRAW_INDEX_OFFSET_2			0x35
+#define	PACKET3_DRAW_PREAMBLE				0x36
+#define	PACKET3_WRITE_DATA				0x37
+#define		WRITE_DATA_DST_SEL(x)                   ((x) << 8)
+		/* 0 - register
+		 * 1 - memory (sync - via GRBM)
+		 * 2 - gl2
+		 * 3 - gds
+		 * 4 - reserved
+		 * 5 - memory (async - direct)
+		 */
+#define		WR_ONE_ADDR                             (1 << 16)
+#define		WR_CONFIRM                              (1 << 20)
+#define		WRITE_DATA_CACHE_POLICY(x)              ((x) << 25)
+		/* 0 - LRU
+		 * 1 - Stream
+		 */
+#define		WRITE_DATA_ENGINE_SEL(x)                ((x) << 30)
+		/* 0 - me
+		 * 1 - pfp
+		 * 2 - ce
+		 */
+#define	PACKET3_DRAW_INDEX_INDIRECT_MULTI		0x38
+#define	PACKET3_MEM_SEMAPHORE				0x39
+#              define PACKET3_SEM_USE_MAILBOX       (0x1 << 16)
+#              define PACKET3_SEM_SEL_SIGNAL_TYPE   (0x1 << 20) /* 0 = increment, 1 = write 1 */
+#              define PACKET3_SEM_CLIENT_CODE	    ((x) << 24) /* 0 = CP, 1 = CB, 2 = DB */
+#              define PACKET3_SEM_SEL_SIGNAL	    (0x6 << 29)
+#              define PACKET3_SEM_SEL_WAIT	    (0x7 << 29)
+#define	PACKET3_COPY_DW					0x3B
+#define	PACKET3_WAIT_REG_MEM				0x3C
+#define		WAIT_REG_MEM_FUNCTION(x)                ((x) << 0)
+		/* 0 - always
+		 * 1 - <
+		 * 2 - <=
+		 * 3 - ==
+		 * 4 - !=
+		 * 5 - >=
+		 * 6 - >
+		 */
+#define		WAIT_REG_MEM_MEM_SPACE(x)               ((x) << 4)
+		/* 0 - reg
+		 * 1 - mem
+		 */
+#define		WAIT_REG_MEM_OPERATION(x)               ((x) << 6)
+		/* 0 - wait_reg_mem
+		 * 1 - wr_wait_wr_reg
+		 */
+#define		WAIT_REG_MEM_ENGINE(x)                  ((x) << 8)
+		/* 0 - me
+		 * 1 - pfp
+		 */
+#define	PACKET3_INDIRECT_BUFFER				0x3F
+#define		INDIRECT_BUFFER_TCL2_VOLATILE           (1 << 22)
+#define		INDIRECT_BUFFER_VALID                   (1 << 23)
+#define		INDIRECT_BUFFER_CACHE_POLICY(x)         ((x) << 28)
+		/* 0 - LRU
+		 * 1 - Stream
+		 * 2 - Bypass
+		 */
+#define	PACKET3_COPY_DATA				0x40
+#define	PACKET3_PFP_SYNC_ME				0x42
+#define	PACKET3_SURFACE_SYNC				0x43
+#              define PACKET3_DEST_BASE_0_ENA      (1 << 0)
+#              define PACKET3_DEST_BASE_1_ENA      (1 << 1)
+#              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
+#              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
+#              define PACKET3_CB2_DEST_BASE_ENA    (1 << 8)
+#              define PACKET3_CB3_DEST_BASE_ENA    (1 << 9)
+#              define PACKET3_CB4_DEST_BASE_ENA    (1 << 10)
+#              define PACKET3_CB5_DEST_BASE_ENA    (1 << 11)
+#              define PACKET3_CB6_DEST_BASE_ENA    (1 << 12)
+#              define PACKET3_CB7_DEST_BASE_ENA    (1 << 13)
+#              define PACKET3_DB_DEST_BASE_ENA     (1 << 14)
+#              define PACKET3_TCL1_VOL_ACTION_ENA  (1 << 15)
+#              define PACKET3_TC_VOL_ACTION_ENA    (1 << 16) /* L2 */
+#              define PACKET3_TC_WB_ACTION_ENA     (1 << 18) /* L2 */
+#              define PACKET3_DEST_BASE_2_ENA      (1 << 19)
+#              define PACKET3_DEST_BASE_3_ENA      (1 << 21)
+#              define PACKET3_TCL1_ACTION_ENA      (1 << 22)
+#              define PACKET3_TC_ACTION_ENA        (1 << 23) /* L2 */
+#              define PACKET3_CB_ACTION_ENA        (1 << 25)
+#              define PACKET3_DB_ACTION_ENA        (1 << 26)
+#              define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
+#              define PACKET3_SH_KCACHE_VOL_ACTION_ENA (1 << 28)
+#              define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
+#define	PACKET3_COND_WRITE				0x45
+#define	PACKET3_EVENT_WRITE				0x46
+#define		EVENT_TYPE(x)                           ((x) << 0)
+#define		EVENT_INDEX(x)                          ((x) << 8)
+		/* 0 - any non-TS event
+		 * 1 - ZPASS_DONE, PIXEL_PIPE_STAT_*
+		 * 2 - SAMPLE_PIPELINESTAT
+		 * 3 - SAMPLE_STREAMOUTSTAT*
+		 * 4 - *S_PARTIAL_FLUSH
+		 * 5 - EOP events
+		 * 6 - EOS events
+		 */
+#define	PACKET3_EVENT_WRITE_EOP				0x47
+#define		EOP_TCL1_VOL_ACTION_EN                  (1 << 12)
+#define		EOP_TC_VOL_ACTION_EN                    (1 << 13) /* L2 */
+#define		EOP_TC_WB_ACTION_EN                     (1 << 15) /* L2 */
+#define		EOP_TCL1_ACTION_EN                      (1 << 16)
+#define		EOP_TC_ACTION_EN                        (1 << 17) /* L2 */
+#define		EOP_TCL2_VOLATILE                       (1 << 24)
+#define		EOP_CACHE_POLICY(x)                     ((x) << 25)
+		/* 0 - LRU
+		 * 1 - Stream
+		 * 2 - Bypass
+		 */
+#define		DATA_SEL(x)                             ((x) << 29)
+		/* 0 - discard
+		 * 1 - send low 32bit data
+		 * 2 - send 64bit data
+		 * 3 - send 64bit GPU counter value
+		 * 4 - send 64bit sys counter value
+		 */
+#define		INT_SEL(x)                              ((x) << 24)
+		/* 0 - none
+		 * 1 - interrupt only (DATA_SEL = 0)
+		 * 2 - interrupt when data write is confirmed
+		 */
+#define		DST_SEL(x)                              ((x) << 16)
+		/* 0 - MC
+		 * 1 - TC/L2
+		 */
+#define	PACKET3_EVENT_WRITE_EOS				0x48
+#define	PACKET3_RELEASE_MEM				0x49
+#define	PACKET3_PREAMBLE_CNTL				0x4A
+#              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
+#              define PACKET3_PREAMBLE_END_CLEAR_STATE       (3 << 28)
+#define	PACKET3_DMA_DATA				0x50
+/* 1. header
+ * 2. CONTROL
+ * 3. SRC_ADDR_LO or DATA [31:0]
+ * 4. SRC_ADDR_HI [31:0]
+ * 5. DST_ADDR_LO [31:0]
+ * 6. DST_ADDR_HI [7:0]
+ * 7. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+/* CONTROL */
+#              define PACKET3_DMA_DATA_ENGINE(x)     ((x) << 0)
+		/* 0 - ME
+		 * 1 - PFP
+		 */
+#              define PACKET3_DMA_DATA_SRC_CACHE_POLICY(x) ((x) << 13)
+		/* 0 - LRU
+		 * 1 - Stream
+		 * 2 - Bypass
+		 */
+#              define PACKET3_DMA_DATA_SRC_VOLATILE (1 << 15)
+#              define PACKET3_DMA_DATA_DST_SEL(x)  ((x) << 20)
+		/* 0 - DST_ADDR using DAS
+		 * 1 - GDS
+		 * 3 - DST_ADDR using L2
+		 */
+#              define PACKET3_DMA_DATA_DST_CACHE_POLICY(x) ((x) << 25)
+		/* 0 - LRU
+		 * 1 - Stream
+		 * 2 - Bypass
+		 */
+#              define PACKET3_DMA_DATA_DST_VOLATILE (1 << 27)
+#              define PACKET3_DMA_DATA_SRC_SEL(x)  ((x) << 29)
+		/* 0 - SRC_ADDR using SAS
+		 * 1 - GDS
+		 * 2 - DATA
+		 * 3 - SRC_ADDR using L2
+		 */
+#              define PACKET3_DMA_DATA_CP_SYNC     (1 << 31)
+/* COMMAND */
+#              define PACKET3_DMA_DATA_DIS_WC      (1 << 21)
+#              define PACKET3_DMA_DATA_CMD_SRC_SWAP(x) ((x) << 22)
+		/* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_DMA_DATA_CMD_DST_SWAP(x) ((x) << 24)
+		/* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_DMA_DATA_CMD_SAS     (1 << 26)
+		/* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_DMA_DATA_CMD_DAS     (1 << 27)
+		/* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_DMA_DATA_CMD_SAIC    (1 << 28)
+#              define PACKET3_DMA_DATA_CMD_DAIC    (1 << 29)
+#              define PACKET3_DMA_DATA_CMD_RAW_WAIT  (1 << 30)
+#define	PACKET3_AQUIRE_MEM				0x58
+#define	PACKET3_REWIND					0x59
+#define	PACKET3_LOAD_UCONFIG_REG			0x5E
+#define	PACKET3_LOAD_SH_REG				0x5F
+#define	PACKET3_LOAD_CONFIG_REG				0x60
+#define	PACKET3_LOAD_CONTEXT_REG			0x61
+#define	PACKET3_SET_CONFIG_REG				0x68
+#define		PACKET3_SET_CONFIG_REG_START			0x00002000
+#define		PACKET3_SET_CONFIG_REG_END			0x00002c00
+#define	PACKET3_SET_CONTEXT_REG				0x69
+#define		PACKET3_SET_CONTEXT_REG_START			0x0000a000
+#define		PACKET3_SET_CONTEXT_REG_END			0x0000a400
+#define	PACKET3_SET_CONTEXT_REG_INDIRECT		0x73
+#define	PACKET3_SET_SH_REG				0x76
+#define		PACKET3_SET_SH_REG_START			0x00002c00
+#define		PACKET3_SET_SH_REG_END				0x00003000
+#define	PACKET3_SET_SH_REG_OFFSET			0x77
+#define	PACKET3_SET_QUEUE_REG				0x78
+#define	PACKET3_SET_UCONFIG_REG				0x79
+#define		PACKET3_SET_UCONFIG_REG_START			0x0000c000
+#define		PACKET3_SET_UCONFIG_REG_END			0x0000c400
+#define	PACKET3_SCRATCH_RAM_WRITE			0x7D
+#define	PACKET3_SCRATCH_RAM_READ			0x7E
+#define	PACKET3_LOAD_CONST_RAM				0x80
+#define	PACKET3_WRITE_CONST_RAM				0x81
+#define	PACKET3_DUMP_CONST_RAM				0x83
+#define	PACKET3_INCREMENT_CE_COUNTER			0x84
+#define	PACKET3_INCREMENT_DE_COUNTER			0x85
+#define	PACKET3_WAIT_ON_CE_COUNTER			0x86
+#define	PACKET3_WAIT_ON_DE_COUNTER_DIFF			0x88
+#define	PACKET3_SWITCH_BUFFER				0x8B
+
+/* SDMA - first instance at 0xd000, second at 0xd800 */
+#define SDMA0_REGISTER_OFFSET                             0x0 /* not a register */
+#define SDMA1_REGISTER_OFFSET                             0x200 /* not a register */
+#define SDMA_MAX_INSTANCE 2
+
+#define SDMA_PACKET(op, sub_op, e)	((((e) & 0xFFFF) << 16) |	\
+					 (((sub_op) & 0xFF) << 8) |	\
+					 (((op) & 0xFF) << 0))
+/* sDMA opcodes */
+#define	SDMA_OPCODE_NOP					  0
+#define	SDMA_OPCODE_COPY				  1
+#       define SDMA_COPY_SUB_OPCODE_LINEAR                0
+#       define SDMA_COPY_SUB_OPCODE_TILED                 1
+#       define SDMA_COPY_SUB_OPCODE_SOA                   3
+#       define SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW     4
+#       define SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW      5
+#       define SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW        6
+#define	SDMA_OPCODE_WRITE				  2
+#       define SDMA_WRITE_SUB_OPCODE_LINEAR               0
+#       define SDMA_WRTIE_SUB_OPCODE_TILED                1
+#define	SDMA_OPCODE_INDIRECT_BUFFER			  4
+#define	SDMA_OPCODE_FENCE				  5
+#define	SDMA_OPCODE_TRAP				  6
+#define	SDMA_OPCODE_SEMAPHORE				  7
+#       define SDMA_SEMAPHORE_EXTRA_O                     (1 << 13)
+		/* 0 - increment
+		 * 1 - write 1
+		 */
+#       define SDMA_SEMAPHORE_EXTRA_S                     (1 << 14)
+		/* 0 - wait
+		 * 1 - signal
+		 */
+#       define SDMA_SEMAPHORE_EXTRA_M                     (1 << 15)
+		/* mailbox */
+#define	SDMA_OPCODE_POLL_REG_MEM			  8
+#       define SDMA_POLL_REG_MEM_EXTRA_OP(x)              ((x) << 10)
+		/* 0 - wait_reg_mem
+		 * 1 - wr_wait_wr_reg
+		 */
+#       define SDMA_POLL_REG_MEM_EXTRA_FUNC(x)            ((x) << 12)
+		/* 0 - always
+		 * 1 - <
+		 * 2 - <=
+		 * 3 - ==
+		 * 4 - !=
+		 * 5 - >=
+		 * 6 - >
+		 */
+#       define SDMA_POLL_REG_MEM_EXTRA_M                  (1 << 15)
+		/* 0 = register
+		 * 1 = memory
+		 */
+#define	SDMA_OPCODE_COND_EXEC				  9
+#define	SDMA_OPCODE_CONSTANT_FILL			  11
+#       define SDMA_CONSTANT_FILL_EXTRA_SIZE(x)           ((x) << 14)
+		/* 0 = byte fill
+		 * 2 = DW fill
+		 */
+#define	SDMA_OPCODE_GENERATE_PTE_PDE			  12
+#define	SDMA_OPCODE_TIMESTAMP				  13
+#       define SDMA_TIMESTAMP_SUB_OPCODE_SET_LOCAL        0
+#       define SDMA_TIMESTAMP_SUB_OPCODE_GET_LOCAL        1
+#       define SDMA_TIMESTAMP_SUB_OPCODE_GET_GLOBAL       2
+#define	SDMA_OPCODE_SRBM_WRITE				  14
+#       define SDMA_SRBM_WRITE_EXTRA_BYTE_ENABLE(x)       ((x) << 12)
+		/* byte mask */
+
+#define VCE_CMD_NO_OP		0x00000000
+#define VCE_CMD_END		0x00000001
+#define VCE_CMD_IB		0x00000002
+#define VCE_CMD_FENCE		0x00000003
+#define VCE_CMD_TRAP		0x00000004
+#define VCE_CMD_IB_AUTO		0x00000005
+#define VCE_CMD_SEMAPHORE	0x00000006
+
+#endif

+ 944 - 0
drivers/gpu/drm/amd/amdgpu/clearstate_ci.h

@@ -0,0 +1,944 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+static const unsigned int ci_SECT_CONTEXT_def_1[] =
+{
+    0x00000000, // DB_RENDER_CONTROL
+    0x00000000, // DB_COUNT_CONTROL
+    0x00000000, // DB_DEPTH_VIEW
+    0x00000000, // DB_RENDER_OVERRIDE
+    0x00000000, // DB_RENDER_OVERRIDE2
+    0x00000000, // DB_HTILE_DATA_BASE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // DB_DEPTH_BOUNDS_MIN
+    0x00000000, // DB_DEPTH_BOUNDS_MAX
+    0x00000000, // DB_STENCIL_CLEAR
+    0x00000000, // DB_DEPTH_CLEAR
+    0x00000000, // PA_SC_SCREEN_SCISSOR_TL
+    0x40004000, // PA_SC_SCREEN_SCISSOR_BR
+    0, // HOLE
+    0x00000000, // DB_DEPTH_INFO
+    0x00000000, // DB_Z_INFO
+    0x00000000, // DB_STENCIL_INFO
+    0x00000000, // DB_Z_READ_BASE
+    0x00000000, // DB_STENCIL_READ_BASE
+    0x00000000, // DB_Z_WRITE_BASE
+    0x00000000, // DB_STENCIL_WRITE_BASE
+    0x00000000, // DB_DEPTH_SIZE
+    0x00000000, // DB_DEPTH_SLICE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // TA_BC_BASE_ADDR
+    0x00000000, // TA_BC_BASE_ADDR_HI
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // COHER_DEST_BASE_HI_0
+    0x00000000, // COHER_DEST_BASE_HI_1
+    0x00000000, // COHER_DEST_BASE_HI_2
+    0x00000000, // COHER_DEST_BASE_HI_3
+    0x00000000, // COHER_DEST_BASE_2
+    0x00000000, // COHER_DEST_BASE_3
+    0x00000000, // PA_SC_WINDOW_OFFSET
+    0x80000000, // PA_SC_WINDOW_SCISSOR_TL
+    0x40004000, // PA_SC_WINDOW_SCISSOR_BR
+    0x0000ffff, // PA_SC_CLIPRECT_RULE
+    0x00000000, // PA_SC_CLIPRECT_0_TL
+    0x40004000, // PA_SC_CLIPRECT_0_BR
+    0x00000000, // PA_SC_CLIPRECT_1_TL
+    0x40004000, // PA_SC_CLIPRECT_1_BR
+    0x00000000, // PA_SC_CLIPRECT_2_TL
+    0x40004000, // PA_SC_CLIPRECT_2_BR
+    0x00000000, // PA_SC_CLIPRECT_3_TL
+    0x40004000, // PA_SC_CLIPRECT_3_BR
+    0xaa99aaaa, // PA_SC_EDGERULE
+    0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
+    0xffffffff, // CB_TARGET_MASK
+    0xffffffff, // CB_SHADER_MASK
+    0x80000000, // PA_SC_GENERIC_SCISSOR_TL
+    0x40004000, // PA_SC_GENERIC_SCISSOR_BR
+    0x00000000, // COHER_DEST_BASE_0
+    0x00000000, // COHER_DEST_BASE_1
+    0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
+    0x00000000, // PA_SC_VPORT_ZMIN_0
+    0x3f800000, // PA_SC_VPORT_ZMAX_0
+    0x00000000, // PA_SC_VPORT_ZMIN_1
+    0x3f800000, // PA_SC_VPORT_ZMAX_1
+    0x00000000, // PA_SC_VPORT_ZMIN_2
+    0x3f800000, // PA_SC_VPORT_ZMAX_2
+    0x00000000, // PA_SC_VPORT_ZMIN_3
+    0x3f800000, // PA_SC_VPORT_ZMAX_3
+    0x00000000, // PA_SC_VPORT_ZMIN_4
+    0x3f800000, // PA_SC_VPORT_ZMAX_4
+    0x00000000, // PA_SC_VPORT_ZMIN_5
+    0x3f800000, // PA_SC_VPORT_ZMAX_5
+    0x00000000, // PA_SC_VPORT_ZMIN_6
+    0x3f800000, // PA_SC_VPORT_ZMAX_6
+    0x00000000, // PA_SC_VPORT_ZMIN_7
+    0x3f800000, // PA_SC_VPORT_ZMAX_7
+    0x00000000, // PA_SC_VPORT_ZMIN_8
+    0x3f800000, // PA_SC_VPORT_ZMAX_8
+    0x00000000, // PA_SC_VPORT_ZMIN_9
+    0x3f800000, // PA_SC_VPORT_ZMAX_9
+    0x00000000, // PA_SC_VPORT_ZMIN_10
+    0x3f800000, // PA_SC_VPORT_ZMAX_10
+    0x00000000, // PA_SC_VPORT_ZMIN_11
+    0x3f800000, // PA_SC_VPORT_ZMAX_11
+    0x00000000, // PA_SC_VPORT_ZMIN_12
+    0x3f800000, // PA_SC_VPORT_ZMAX_12
+    0x00000000, // PA_SC_VPORT_ZMIN_13
+    0x3f800000, // PA_SC_VPORT_ZMAX_13
+    0x00000000, // PA_SC_VPORT_ZMIN_14
+    0x3f800000, // PA_SC_VPORT_ZMAX_14
+    0x00000000, // PA_SC_VPORT_ZMIN_15
+    0x3f800000, // PA_SC_VPORT_ZMAX_15
+};
+static const unsigned int ci_SECT_CONTEXT_def_2[] =
+{
+    0x00000000, // PA_SC_SCREEN_EXTENT_CONTROL
+    0, // HOLE
+    0x00000000, // CP_PERFMON_CNTX_CNTL
+    0x00000000, // CP_RINGID
+    0x00000000, // CP_VMID
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0xffffffff, // VGT_MAX_VTX_INDX
+    0x00000000, // VGT_MIN_VTX_INDX
+    0x00000000, // VGT_INDX_OFFSET
+    0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
+    0, // HOLE
+    0x00000000, // CB_BLEND_RED
+    0x00000000, // CB_BLEND_GREEN
+    0x00000000, // CB_BLEND_BLUE
+    0x00000000, // CB_BLEND_ALPHA
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // DB_STENCIL_CONTROL
+    0x00000000, // DB_STENCILREFMASK
+    0x00000000, // DB_STENCILREFMASK_BF
+    0, // HOLE
+    0x00000000, // PA_CL_VPORT_XSCALE
+    0x00000000, // PA_CL_VPORT_XOFFSET
+    0x00000000, // PA_CL_VPORT_YSCALE
+    0x00000000, // PA_CL_VPORT_YOFFSET
+    0x00000000, // PA_CL_VPORT_ZSCALE
+    0x00000000, // PA_CL_VPORT_ZOFFSET
+    0x00000000, // PA_CL_VPORT_XSCALE_1
+    0x00000000, // PA_CL_VPORT_XOFFSET_1
+    0x00000000, // PA_CL_VPORT_YSCALE_1
+    0x00000000, // PA_CL_VPORT_YOFFSET_1
+    0x00000000, // PA_CL_VPORT_ZSCALE_1
+    0x00000000, // PA_CL_VPORT_ZOFFSET_1
+    0x00000000, // PA_CL_VPORT_XSCALE_2
+    0x00000000, // PA_CL_VPORT_XOFFSET_2
+    0x00000000, // PA_CL_VPORT_YSCALE_2
+    0x00000000, // PA_CL_VPORT_YOFFSET_2
+    0x00000000, // PA_CL_VPORT_ZSCALE_2
+    0x00000000, // PA_CL_VPORT_ZOFFSET_2
+    0x00000000, // PA_CL_VPORT_XSCALE_3
+    0x00000000, // PA_CL_VPORT_XOFFSET_3
+    0x00000000, // PA_CL_VPORT_YSCALE_3
+    0x00000000, // PA_CL_VPORT_YOFFSET_3
+    0x00000000, // PA_CL_VPORT_ZSCALE_3
+    0x00000000, // PA_CL_VPORT_ZOFFSET_3
+    0x00000000, // PA_CL_VPORT_XSCALE_4
+    0x00000000, // PA_CL_VPORT_XOFFSET_4
+    0x00000000, // PA_CL_VPORT_YSCALE_4
+    0x00000000, // PA_CL_VPORT_YOFFSET_4
+    0x00000000, // PA_CL_VPORT_ZSCALE_4
+    0x00000000, // PA_CL_VPORT_ZOFFSET_4
+    0x00000000, // PA_CL_VPORT_XSCALE_5
+    0x00000000, // PA_CL_VPORT_XOFFSET_5
+    0x00000000, // PA_CL_VPORT_YSCALE_5
+    0x00000000, // PA_CL_VPORT_YOFFSET_5
+    0x00000000, // PA_CL_VPORT_ZSCALE_5
+    0x00000000, // PA_CL_VPORT_ZOFFSET_5
+    0x00000000, // PA_CL_VPORT_XSCALE_6
+    0x00000000, // PA_CL_VPORT_XOFFSET_6
+    0x00000000, // PA_CL_VPORT_YSCALE_6
+    0x00000000, // PA_CL_VPORT_YOFFSET_6
+    0x00000000, // PA_CL_VPORT_ZSCALE_6
+    0x00000000, // PA_CL_VPORT_ZOFFSET_6
+    0x00000000, // PA_CL_VPORT_XSCALE_7
+    0x00000000, // PA_CL_VPORT_XOFFSET_7
+    0x00000000, // PA_CL_VPORT_YSCALE_7
+    0x00000000, // PA_CL_VPORT_YOFFSET_7
+    0x00000000, // PA_CL_VPORT_ZSCALE_7
+    0x00000000, // PA_CL_VPORT_ZOFFSET_7
+    0x00000000, // PA_CL_VPORT_XSCALE_8
+    0x00000000, // PA_CL_VPORT_XOFFSET_8
+    0x00000000, // PA_CL_VPORT_YSCALE_8
+    0x00000000, // PA_CL_VPORT_YOFFSET_8
+    0x00000000, // PA_CL_VPORT_ZSCALE_8
+    0x00000000, // PA_CL_VPORT_ZOFFSET_8
+    0x00000000, // PA_CL_VPORT_XSCALE_9
+    0x00000000, // PA_CL_VPORT_XOFFSET_9
+    0x00000000, // PA_CL_VPORT_YSCALE_9
+    0x00000000, // PA_CL_VPORT_YOFFSET_9
+    0x00000000, // PA_CL_VPORT_ZSCALE_9
+    0x00000000, // PA_CL_VPORT_ZOFFSET_9
+    0x00000000, // PA_CL_VPORT_XSCALE_10
+    0x00000000, // PA_CL_VPORT_XOFFSET_10
+    0x00000000, // PA_CL_VPORT_YSCALE_10
+    0x00000000, // PA_CL_VPORT_YOFFSET_10
+    0x00000000, // PA_CL_VPORT_ZSCALE_10
+    0x00000000, // PA_CL_VPORT_ZOFFSET_10
+    0x00000000, // PA_CL_VPORT_XSCALE_11
+    0x00000000, // PA_CL_VPORT_XOFFSET_11
+    0x00000000, // PA_CL_VPORT_YSCALE_11
+    0x00000000, // PA_CL_VPORT_YOFFSET_11
+    0x00000000, // PA_CL_VPORT_ZSCALE_11
+    0x00000000, // PA_CL_VPORT_ZOFFSET_11
+    0x00000000, // PA_CL_VPORT_XSCALE_12
+    0x00000000, // PA_CL_VPORT_XOFFSET_12
+    0x00000000, // PA_CL_VPORT_YSCALE_12
+    0x00000000, // PA_CL_VPORT_YOFFSET_12
+    0x00000000, // PA_CL_VPORT_ZSCALE_12
+    0x00000000, // PA_CL_VPORT_ZOFFSET_12
+    0x00000000, // PA_CL_VPORT_XSCALE_13
+    0x00000000, // PA_CL_VPORT_XOFFSET_13
+    0x00000000, // PA_CL_VPORT_YSCALE_13
+    0x00000000, // PA_CL_VPORT_YOFFSET_13
+    0x00000000, // PA_CL_VPORT_ZSCALE_13
+    0x00000000, // PA_CL_VPORT_ZOFFSET_13
+    0x00000000, // PA_CL_VPORT_XSCALE_14
+    0x00000000, // PA_CL_VPORT_XOFFSET_14
+    0x00000000, // PA_CL_VPORT_YSCALE_14
+    0x00000000, // PA_CL_VPORT_YOFFSET_14
+    0x00000000, // PA_CL_VPORT_ZSCALE_14
+    0x00000000, // PA_CL_VPORT_ZOFFSET_14
+    0x00000000, // PA_CL_VPORT_XSCALE_15
+    0x00000000, // PA_CL_VPORT_XOFFSET_15
+    0x00000000, // PA_CL_VPORT_YSCALE_15
+    0x00000000, // PA_CL_VPORT_YOFFSET_15
+    0x00000000, // PA_CL_VPORT_ZSCALE_15
+    0x00000000, // PA_CL_VPORT_ZOFFSET_15
+    0x00000000, // PA_CL_UCP_0_X
+    0x00000000, // PA_CL_UCP_0_Y
+    0x00000000, // PA_CL_UCP_0_Z
+    0x00000000, // PA_CL_UCP_0_W
+    0x00000000, // PA_CL_UCP_1_X
+    0x00000000, // PA_CL_UCP_1_Y
+    0x00000000, // PA_CL_UCP_1_Z
+    0x00000000, // PA_CL_UCP_1_W
+    0x00000000, // PA_CL_UCP_2_X
+    0x00000000, // PA_CL_UCP_2_Y
+    0x00000000, // PA_CL_UCP_2_Z
+    0x00000000, // PA_CL_UCP_2_W
+    0x00000000, // PA_CL_UCP_3_X
+    0x00000000, // PA_CL_UCP_3_Y
+    0x00000000, // PA_CL_UCP_3_Z
+    0x00000000, // PA_CL_UCP_3_W
+    0x00000000, // PA_CL_UCP_4_X
+    0x00000000, // PA_CL_UCP_4_Y
+    0x00000000, // PA_CL_UCP_4_Z
+    0x00000000, // PA_CL_UCP_4_W
+    0x00000000, // PA_CL_UCP_5_X
+    0x00000000, // PA_CL_UCP_5_Y
+    0x00000000, // PA_CL_UCP_5_Z
+    0x00000000, // PA_CL_UCP_5_W
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SPI_PS_INPUT_CNTL_0
+    0x00000000, // SPI_PS_INPUT_CNTL_1
+    0x00000000, // SPI_PS_INPUT_CNTL_2
+    0x00000000, // SPI_PS_INPUT_CNTL_3
+    0x00000000, // SPI_PS_INPUT_CNTL_4
+    0x00000000, // SPI_PS_INPUT_CNTL_5
+    0x00000000, // SPI_PS_INPUT_CNTL_6
+    0x00000000, // SPI_PS_INPUT_CNTL_7
+    0x00000000, // SPI_PS_INPUT_CNTL_8
+    0x00000000, // SPI_PS_INPUT_CNTL_9
+    0x00000000, // SPI_PS_INPUT_CNTL_10
+    0x00000000, // SPI_PS_INPUT_CNTL_11
+    0x00000000, // SPI_PS_INPUT_CNTL_12
+    0x00000000, // SPI_PS_INPUT_CNTL_13
+    0x00000000, // SPI_PS_INPUT_CNTL_14
+    0x00000000, // SPI_PS_INPUT_CNTL_15
+    0x00000000, // SPI_PS_INPUT_CNTL_16
+    0x00000000, // SPI_PS_INPUT_CNTL_17
+    0x00000000, // SPI_PS_INPUT_CNTL_18
+    0x00000000, // SPI_PS_INPUT_CNTL_19
+    0x00000000, // SPI_PS_INPUT_CNTL_20
+    0x00000000, // SPI_PS_INPUT_CNTL_21
+    0x00000000, // SPI_PS_INPUT_CNTL_22
+    0x00000000, // SPI_PS_INPUT_CNTL_23
+    0x00000000, // SPI_PS_INPUT_CNTL_24
+    0x00000000, // SPI_PS_INPUT_CNTL_25
+    0x00000000, // SPI_PS_INPUT_CNTL_26
+    0x00000000, // SPI_PS_INPUT_CNTL_27
+    0x00000000, // SPI_PS_INPUT_CNTL_28
+    0x00000000, // SPI_PS_INPUT_CNTL_29
+    0x00000000, // SPI_PS_INPUT_CNTL_30
+    0x00000000, // SPI_PS_INPUT_CNTL_31
+    0x00000000, // SPI_VS_OUT_CONFIG
+    0, // HOLE
+    0x00000000, // SPI_PS_INPUT_ENA
+    0x00000000, // SPI_PS_INPUT_ADDR
+    0x00000000, // SPI_INTERP_CONTROL_0
+    0x00000002, // SPI_PS_IN_CONTROL
+    0, // HOLE
+    0x00000000, // SPI_BARYC_CNTL
+    0, // HOLE
+    0x00000000, // SPI_TMPRING_SIZE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SPI_SHADER_POS_FORMAT
+    0x00000000, // SPI_SHADER_Z_FORMAT
+    0x00000000, // SPI_SHADER_COL_FORMAT
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_BLEND0_CONTROL
+    0x00000000, // CB_BLEND1_CONTROL
+    0x00000000, // CB_BLEND2_CONTROL
+    0x00000000, // CB_BLEND3_CONTROL
+    0x00000000, // CB_BLEND4_CONTROL
+    0x00000000, // CB_BLEND5_CONTROL
+    0x00000000, // CB_BLEND6_CONTROL
+    0x00000000, // CB_BLEND7_CONTROL
+};
+static const unsigned int ci_SECT_CONTEXT_def_3[] =
+{
+    0x00000000, // PA_CL_POINT_X_RAD
+    0x00000000, // PA_CL_POINT_Y_RAD
+    0x00000000, // PA_CL_POINT_SIZE
+    0x00000000, // PA_CL_POINT_CULL_RAD
+    0x00000000, // VGT_DMA_BASE_HI
+    0x00000000, // VGT_DMA_BASE
+};
+static const unsigned int ci_SECT_CONTEXT_def_4[] =
+{
+    0x00000000, // DB_DEPTH_CONTROL
+    0x00000000, // DB_EQAA
+    0x00000000, // CB_COLOR_CONTROL
+    0x00000000, // DB_SHADER_CONTROL
+    0x00090000, // PA_CL_CLIP_CNTL
+    0x00000004, // PA_SU_SC_MODE_CNTL
+    0x00000000, // PA_CL_VTE_CNTL
+    0x00000000, // PA_CL_VS_OUT_CNTL
+    0x00000000, // PA_CL_NANINF_CNTL
+    0x00000000, // PA_SU_LINE_STIPPLE_CNTL
+    0x00000000, // PA_SU_LINE_STIPPLE_SCALE
+    0x00000000, // PA_SU_PRIM_FILTER_CNTL
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // PA_SU_POINT_SIZE
+    0x00000000, // PA_SU_POINT_MINMAX
+    0x00000000, // PA_SU_LINE_CNTL
+    0x00000000, // PA_SC_LINE_STIPPLE
+    0x00000000, // VGT_OUTPUT_PATH_CNTL
+    0x00000000, // VGT_HOS_CNTL
+    0x00000000, // VGT_HOS_MAX_TESS_LEVEL
+    0x00000000, // VGT_HOS_MIN_TESS_LEVEL
+    0x00000000, // VGT_HOS_REUSE_DEPTH
+    0x00000000, // VGT_GROUP_PRIM_TYPE
+    0x00000000, // VGT_GROUP_FIRST_DECR
+    0x00000000, // VGT_GROUP_DECR
+    0x00000000, // VGT_GROUP_VECT_0_CNTL
+    0x00000000, // VGT_GROUP_VECT_1_CNTL
+    0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
+    0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
+    0x00000000, // VGT_GS_MODE
+    0x00000000, // VGT_GS_ONCHIP_CNTL
+    0x00000000, // PA_SC_MODE_CNTL_0
+    0x00000000, // PA_SC_MODE_CNTL_1
+    0x00000000, // VGT_ENHANCE
+    0x00000100, // VGT_GS_PER_ES
+    0x00000080, // VGT_ES_PER_GS
+    0x00000002, // VGT_GS_PER_VS
+    0x00000000, // VGT_GSVS_RING_OFFSET_1
+    0x00000000, // VGT_GSVS_RING_OFFSET_2
+    0x00000000, // VGT_GSVS_RING_OFFSET_3
+    0x00000000, // VGT_GS_OUT_PRIM_TYPE
+    0x00000000, // IA_ENHANCE
+};
+static const unsigned int ci_SECT_CONTEXT_def_5[] =
+{
+    0x00000000, // WD_ENHANCE
+    0x00000000, // VGT_PRIMITIVEID_EN
+};
+static const unsigned int ci_SECT_CONTEXT_def_6[] =
+{
+    0x00000000, // VGT_PRIMITIVEID_RESET
+};
+static const unsigned int ci_SECT_CONTEXT_def_7[] =
+{
+    0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_INSTANCE_STEP_RATE_0
+    0x00000000, // VGT_INSTANCE_STEP_RATE_1
+    0x000000ff, // IA_MULTI_VGT_PARAM
+    0x00000000, // VGT_ESGS_RING_ITEMSIZE
+    0x00000000, // VGT_GSVS_RING_ITEMSIZE
+    0x00000000, // VGT_REUSE_OFF
+    0x00000000, // VGT_VTX_CNT_EN
+    0x00000000, // DB_HTILE_SURFACE
+    0x00000000, // DB_SRESULTS_COMPARE_STATE0
+    0x00000000, // DB_SRESULTS_COMPARE_STATE1
+    0x00000000, // DB_PRELOAD_CONTROL
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+    0, // HOLE
+    0x00000000, // VGT_GS_MAX_VERT_OUT
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_SHADER_STAGES_EN
+    0x00000000, // VGT_LS_HS_CONFIG
+    0x00000000, // VGT_GS_VERT_ITEMSIZE
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_1
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_2
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_3
+    0x00000000, // VGT_TF_PARAM
+    0x00000000, // DB_ALPHA_TO_MASK
+    0, // HOLE
+    0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
+    0x00000000, // PA_SU_POLY_OFFSET_CLAMP
+    0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
+    0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
+    0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
+    0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
+    0x00000000, // VGT_GS_INSTANCE_CNT
+    0x00000000, // VGT_STRMOUT_CONFIG
+    0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // PA_SC_CENTROID_PRIORITY_0
+    0x00000000, // PA_SC_CENTROID_PRIORITY_1
+    0x00001000, // PA_SC_LINE_CNTL
+    0x00000000, // PA_SC_AA_CONFIG
+    0x00000005, // PA_SU_VTX_CNTL
+    0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
+    0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
+    0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
+    0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+    0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
+    0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
+    0x00000010, // VGT_OUT_DEALLOC_CNTL
+    0x00000000, // CB_COLOR0_BASE
+    0x00000000, // CB_COLOR0_PITCH
+    0x00000000, // CB_COLOR0_SLICE
+    0x00000000, // CB_COLOR0_VIEW
+    0x00000000, // CB_COLOR0_INFO
+    0x00000000, // CB_COLOR0_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR0_CMASK
+    0x00000000, // CB_COLOR0_CMASK_SLICE
+    0x00000000, // CB_COLOR0_FMASK
+    0x00000000, // CB_COLOR0_FMASK_SLICE
+    0x00000000, // CB_COLOR0_CLEAR_WORD0
+    0x00000000, // CB_COLOR0_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR1_BASE
+    0x00000000, // CB_COLOR1_PITCH
+    0x00000000, // CB_COLOR1_SLICE
+    0x00000000, // CB_COLOR1_VIEW
+    0x00000000, // CB_COLOR1_INFO
+    0x00000000, // CB_COLOR1_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR1_CMASK
+    0x00000000, // CB_COLOR1_CMASK_SLICE
+    0x00000000, // CB_COLOR1_FMASK
+    0x00000000, // CB_COLOR1_FMASK_SLICE
+    0x00000000, // CB_COLOR1_CLEAR_WORD0
+    0x00000000, // CB_COLOR1_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR2_BASE
+    0x00000000, // CB_COLOR2_PITCH
+    0x00000000, // CB_COLOR2_SLICE
+    0x00000000, // CB_COLOR2_VIEW
+    0x00000000, // CB_COLOR2_INFO
+    0x00000000, // CB_COLOR2_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR2_CMASK
+    0x00000000, // CB_COLOR2_CMASK_SLICE
+    0x00000000, // CB_COLOR2_FMASK
+    0x00000000, // CB_COLOR2_FMASK_SLICE
+    0x00000000, // CB_COLOR2_CLEAR_WORD0
+    0x00000000, // CB_COLOR2_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR3_BASE
+    0x00000000, // CB_COLOR3_PITCH
+    0x00000000, // CB_COLOR3_SLICE
+    0x00000000, // CB_COLOR3_VIEW
+    0x00000000, // CB_COLOR3_INFO
+    0x00000000, // CB_COLOR3_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR3_CMASK
+    0x00000000, // CB_COLOR3_CMASK_SLICE
+    0x00000000, // CB_COLOR3_FMASK
+    0x00000000, // CB_COLOR3_FMASK_SLICE
+    0x00000000, // CB_COLOR3_CLEAR_WORD0
+    0x00000000, // CB_COLOR3_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR4_BASE
+    0x00000000, // CB_COLOR4_PITCH
+    0x00000000, // CB_COLOR4_SLICE
+    0x00000000, // CB_COLOR4_VIEW
+    0x00000000, // CB_COLOR4_INFO
+    0x00000000, // CB_COLOR4_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR4_CMASK
+    0x00000000, // CB_COLOR4_CMASK_SLICE
+    0x00000000, // CB_COLOR4_FMASK
+    0x00000000, // CB_COLOR4_FMASK_SLICE
+    0x00000000, // CB_COLOR4_CLEAR_WORD0
+    0x00000000, // CB_COLOR4_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR5_BASE
+    0x00000000, // CB_COLOR5_PITCH
+    0x00000000, // CB_COLOR5_SLICE
+    0x00000000, // CB_COLOR5_VIEW
+    0x00000000, // CB_COLOR5_INFO
+    0x00000000, // CB_COLOR5_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR5_CMASK
+    0x00000000, // CB_COLOR5_CMASK_SLICE
+    0x00000000, // CB_COLOR5_FMASK
+    0x00000000, // CB_COLOR5_FMASK_SLICE
+    0x00000000, // CB_COLOR5_CLEAR_WORD0
+    0x00000000, // CB_COLOR5_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR6_BASE
+    0x00000000, // CB_COLOR6_PITCH
+    0x00000000, // CB_COLOR6_SLICE
+    0x00000000, // CB_COLOR6_VIEW
+    0x00000000, // CB_COLOR6_INFO
+    0x00000000, // CB_COLOR6_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR6_CMASK
+    0x00000000, // CB_COLOR6_CMASK_SLICE
+    0x00000000, // CB_COLOR6_FMASK
+    0x00000000, // CB_COLOR6_FMASK_SLICE
+    0x00000000, // CB_COLOR6_CLEAR_WORD0
+    0x00000000, // CB_COLOR6_CLEAR_WORD1
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_COLOR7_BASE
+    0x00000000, // CB_COLOR7_PITCH
+    0x00000000, // CB_COLOR7_SLICE
+    0x00000000, // CB_COLOR7_VIEW
+    0x00000000, // CB_COLOR7_INFO
+    0x00000000, // CB_COLOR7_ATTRIB
+    0, // HOLE
+    0x00000000, // CB_COLOR7_CMASK
+    0x00000000, // CB_COLOR7_CMASK_SLICE
+    0x00000000, // CB_COLOR7_FMASK
+    0x00000000, // CB_COLOR7_FMASK_SLICE
+    0x00000000, // CB_COLOR7_CLEAR_WORD0
+    0x00000000, // CB_COLOR7_CLEAR_WORD1
+};
+static const struct cs_extent_def ci_SECT_CONTEXT_defs[] =
+{
+    {ci_SECT_CONTEXT_def_1, 0x0000a000, 212 },
+    {ci_SECT_CONTEXT_def_2, 0x0000a0d6, 274 },
+    {ci_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
+    {ci_SECT_CONTEXT_def_4, 0x0000a200, 157 },
+    {ci_SECT_CONTEXT_def_5, 0x0000a2a0, 2 },
+    {ci_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
+    {ci_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
+    { 0, 0, 0 }
+};
+static const struct cs_section_def ci_cs_data[] = {
+    { ci_SECT_CONTEXT_defs, SECT_CONTEXT },
+    { 0, SECT_NONE }
+};

+ 44 - 0
drivers/gpu/drm/amd/amdgpu/clearstate_defs.h

@@ -0,0 +1,44 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef CLEARSTATE_DEFS_H
+#define CLEARSTATE_DEFS_H
+
+enum section_id {
+    SECT_NONE,
+    SECT_CONTEXT,
+    SECT_CLEAR,
+    SECT_CTRLCONST
+};
+
+struct cs_extent_def {
+    const unsigned int *extent;
+    const unsigned int reg_index;
+    const unsigned int reg_count;
+};
+
+struct cs_section_def {
+    const struct cs_extent_def *section;
+    const enum section_id id;
+};
+
+#endif

+ 944 - 0
drivers/gpu/drm/amd/amdgpu/clearstate_vi.h

@@ -0,0 +1,944 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+static const unsigned int vi_SECT_CONTEXT_def_1[] =
+{
+    0x00000000, // DB_RENDER_CONTROL
+    0x00000000, // DB_COUNT_CONTROL
+    0x00000000, // DB_DEPTH_VIEW
+    0x00000000, // DB_RENDER_OVERRIDE
+    0x00000000, // DB_RENDER_OVERRIDE2
+    0x00000000, // DB_HTILE_DATA_BASE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // DB_DEPTH_BOUNDS_MIN
+    0x00000000, // DB_DEPTH_BOUNDS_MAX
+    0x00000000, // DB_STENCIL_CLEAR
+    0x00000000, // DB_DEPTH_CLEAR
+    0x00000000, // PA_SC_SCREEN_SCISSOR_TL
+    0x40004000, // PA_SC_SCREEN_SCISSOR_BR
+    0, // HOLE
+    0x00000000, // DB_DEPTH_INFO
+    0x00000000, // DB_Z_INFO
+    0x00000000, // DB_STENCIL_INFO
+    0x00000000, // DB_Z_READ_BASE
+    0x00000000, // DB_STENCIL_READ_BASE
+    0x00000000, // DB_Z_WRITE_BASE
+    0x00000000, // DB_STENCIL_WRITE_BASE
+    0x00000000, // DB_DEPTH_SIZE
+    0x00000000, // DB_DEPTH_SLICE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // TA_BC_BASE_ADDR
+    0x00000000, // TA_BC_BASE_ADDR_HI
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // COHER_DEST_BASE_HI_0
+    0x00000000, // COHER_DEST_BASE_HI_1
+    0x00000000, // COHER_DEST_BASE_HI_2
+    0x00000000, // COHER_DEST_BASE_HI_3
+    0x00000000, // COHER_DEST_BASE_2
+    0x00000000, // COHER_DEST_BASE_3
+    0x00000000, // PA_SC_WINDOW_OFFSET
+    0x80000000, // PA_SC_WINDOW_SCISSOR_TL
+    0x40004000, // PA_SC_WINDOW_SCISSOR_BR
+    0x0000ffff, // PA_SC_CLIPRECT_RULE
+    0x00000000, // PA_SC_CLIPRECT_0_TL
+    0x40004000, // PA_SC_CLIPRECT_0_BR
+    0x00000000, // PA_SC_CLIPRECT_1_TL
+    0x40004000, // PA_SC_CLIPRECT_1_BR
+    0x00000000, // PA_SC_CLIPRECT_2_TL
+    0x40004000, // PA_SC_CLIPRECT_2_BR
+    0x00000000, // PA_SC_CLIPRECT_3_TL
+    0x40004000, // PA_SC_CLIPRECT_3_BR
+    0xaa99aaaa, // PA_SC_EDGERULE
+    0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
+    0xffffffff, // CB_TARGET_MASK
+    0xffffffff, // CB_SHADER_MASK
+    0x80000000, // PA_SC_GENERIC_SCISSOR_TL
+    0x40004000, // PA_SC_GENERIC_SCISSOR_BR
+    0x00000000, // COHER_DEST_BASE_0
+    0x00000000, // COHER_DEST_BASE_1
+    0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
+    0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
+    0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
+    0x00000000, // PA_SC_VPORT_ZMIN_0
+    0x3f800000, // PA_SC_VPORT_ZMAX_0
+    0x00000000, // PA_SC_VPORT_ZMIN_1
+    0x3f800000, // PA_SC_VPORT_ZMAX_1
+    0x00000000, // PA_SC_VPORT_ZMIN_2
+    0x3f800000, // PA_SC_VPORT_ZMAX_2
+    0x00000000, // PA_SC_VPORT_ZMIN_3
+    0x3f800000, // PA_SC_VPORT_ZMAX_3
+    0x00000000, // PA_SC_VPORT_ZMIN_4
+    0x3f800000, // PA_SC_VPORT_ZMAX_4
+    0x00000000, // PA_SC_VPORT_ZMIN_5
+    0x3f800000, // PA_SC_VPORT_ZMAX_5
+    0x00000000, // PA_SC_VPORT_ZMIN_6
+    0x3f800000, // PA_SC_VPORT_ZMAX_6
+    0x00000000, // PA_SC_VPORT_ZMIN_7
+    0x3f800000, // PA_SC_VPORT_ZMAX_7
+    0x00000000, // PA_SC_VPORT_ZMIN_8
+    0x3f800000, // PA_SC_VPORT_ZMAX_8
+    0x00000000, // PA_SC_VPORT_ZMIN_9
+    0x3f800000, // PA_SC_VPORT_ZMAX_9
+    0x00000000, // PA_SC_VPORT_ZMIN_10
+    0x3f800000, // PA_SC_VPORT_ZMAX_10
+    0x00000000, // PA_SC_VPORT_ZMIN_11
+    0x3f800000, // PA_SC_VPORT_ZMAX_11
+    0x00000000, // PA_SC_VPORT_ZMIN_12
+    0x3f800000, // PA_SC_VPORT_ZMAX_12
+    0x00000000, // PA_SC_VPORT_ZMIN_13
+    0x3f800000, // PA_SC_VPORT_ZMAX_13
+    0x00000000, // PA_SC_VPORT_ZMIN_14
+    0x3f800000, // PA_SC_VPORT_ZMAX_14
+    0x00000000, // PA_SC_VPORT_ZMIN_15
+    0x3f800000, // PA_SC_VPORT_ZMAX_15
+};
+static const unsigned int vi_SECT_CONTEXT_def_2[] =
+{
+    0x00000000, // PA_SC_SCREEN_EXTENT_CONTROL
+    0, // HOLE
+    0x00000000, // CP_PERFMON_CNTX_CNTL
+    0x00000000, // CP_RINGID
+    0x00000000, // CP_VMID
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0xffffffff, // VGT_MAX_VTX_INDX
+    0x00000000, // VGT_MIN_VTX_INDX
+    0x00000000, // VGT_INDX_OFFSET
+    0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
+    0, // HOLE
+    0x00000000, // CB_BLEND_RED
+    0x00000000, // CB_BLEND_GREEN
+    0x00000000, // CB_BLEND_BLUE
+    0x00000000, // CB_BLEND_ALPHA
+    0x00000000, // CB_DCC_CONTROL
+    0, // HOLE
+    0x00000000, // DB_STENCIL_CONTROL
+    0x00000000, // DB_STENCILREFMASK
+    0x00000000, // DB_STENCILREFMASK_BF
+    0, // HOLE
+    0x00000000, // PA_CL_VPORT_XSCALE
+    0x00000000, // PA_CL_VPORT_XOFFSET
+    0x00000000, // PA_CL_VPORT_YSCALE
+    0x00000000, // PA_CL_VPORT_YOFFSET
+    0x00000000, // PA_CL_VPORT_ZSCALE
+    0x00000000, // PA_CL_VPORT_ZOFFSET
+    0x00000000, // PA_CL_VPORT_XSCALE_1
+    0x00000000, // PA_CL_VPORT_XOFFSET_1
+    0x00000000, // PA_CL_VPORT_YSCALE_1
+    0x00000000, // PA_CL_VPORT_YOFFSET_1
+    0x00000000, // PA_CL_VPORT_ZSCALE_1
+    0x00000000, // PA_CL_VPORT_ZOFFSET_1
+    0x00000000, // PA_CL_VPORT_XSCALE_2
+    0x00000000, // PA_CL_VPORT_XOFFSET_2
+    0x00000000, // PA_CL_VPORT_YSCALE_2
+    0x00000000, // PA_CL_VPORT_YOFFSET_2
+    0x00000000, // PA_CL_VPORT_ZSCALE_2
+    0x00000000, // PA_CL_VPORT_ZOFFSET_2
+    0x00000000, // PA_CL_VPORT_XSCALE_3
+    0x00000000, // PA_CL_VPORT_XOFFSET_3
+    0x00000000, // PA_CL_VPORT_YSCALE_3
+    0x00000000, // PA_CL_VPORT_YOFFSET_3
+    0x00000000, // PA_CL_VPORT_ZSCALE_3
+    0x00000000, // PA_CL_VPORT_ZOFFSET_3
+    0x00000000, // PA_CL_VPORT_XSCALE_4
+    0x00000000, // PA_CL_VPORT_XOFFSET_4
+    0x00000000, // PA_CL_VPORT_YSCALE_4
+    0x00000000, // PA_CL_VPORT_YOFFSET_4
+    0x00000000, // PA_CL_VPORT_ZSCALE_4
+    0x00000000, // PA_CL_VPORT_ZOFFSET_4
+    0x00000000, // PA_CL_VPORT_XSCALE_5
+    0x00000000, // PA_CL_VPORT_XOFFSET_5
+    0x00000000, // PA_CL_VPORT_YSCALE_5
+    0x00000000, // PA_CL_VPORT_YOFFSET_5
+    0x00000000, // PA_CL_VPORT_ZSCALE_5
+    0x00000000, // PA_CL_VPORT_ZOFFSET_5
+    0x00000000, // PA_CL_VPORT_XSCALE_6
+    0x00000000, // PA_CL_VPORT_XOFFSET_6
+    0x00000000, // PA_CL_VPORT_YSCALE_6
+    0x00000000, // PA_CL_VPORT_YOFFSET_6
+    0x00000000, // PA_CL_VPORT_ZSCALE_6
+    0x00000000, // PA_CL_VPORT_ZOFFSET_6
+    0x00000000, // PA_CL_VPORT_XSCALE_7
+    0x00000000, // PA_CL_VPORT_XOFFSET_7
+    0x00000000, // PA_CL_VPORT_YSCALE_7
+    0x00000000, // PA_CL_VPORT_YOFFSET_7
+    0x00000000, // PA_CL_VPORT_ZSCALE_7
+    0x00000000, // PA_CL_VPORT_ZOFFSET_7
+    0x00000000, // PA_CL_VPORT_XSCALE_8
+    0x00000000, // PA_CL_VPORT_XOFFSET_8
+    0x00000000, // PA_CL_VPORT_YSCALE_8
+    0x00000000, // PA_CL_VPORT_YOFFSET_8
+    0x00000000, // PA_CL_VPORT_ZSCALE_8
+    0x00000000, // PA_CL_VPORT_ZOFFSET_8
+    0x00000000, // PA_CL_VPORT_XSCALE_9
+    0x00000000, // PA_CL_VPORT_XOFFSET_9
+    0x00000000, // PA_CL_VPORT_YSCALE_9
+    0x00000000, // PA_CL_VPORT_YOFFSET_9
+    0x00000000, // PA_CL_VPORT_ZSCALE_9
+    0x00000000, // PA_CL_VPORT_ZOFFSET_9
+    0x00000000, // PA_CL_VPORT_XSCALE_10
+    0x00000000, // PA_CL_VPORT_XOFFSET_10
+    0x00000000, // PA_CL_VPORT_YSCALE_10
+    0x00000000, // PA_CL_VPORT_YOFFSET_10
+    0x00000000, // PA_CL_VPORT_ZSCALE_10
+    0x00000000, // PA_CL_VPORT_ZOFFSET_10
+    0x00000000, // PA_CL_VPORT_XSCALE_11
+    0x00000000, // PA_CL_VPORT_XOFFSET_11
+    0x00000000, // PA_CL_VPORT_YSCALE_11
+    0x00000000, // PA_CL_VPORT_YOFFSET_11
+    0x00000000, // PA_CL_VPORT_ZSCALE_11
+    0x00000000, // PA_CL_VPORT_ZOFFSET_11
+    0x00000000, // PA_CL_VPORT_XSCALE_12
+    0x00000000, // PA_CL_VPORT_XOFFSET_12
+    0x00000000, // PA_CL_VPORT_YSCALE_12
+    0x00000000, // PA_CL_VPORT_YOFFSET_12
+    0x00000000, // PA_CL_VPORT_ZSCALE_12
+    0x00000000, // PA_CL_VPORT_ZOFFSET_12
+    0x00000000, // PA_CL_VPORT_XSCALE_13
+    0x00000000, // PA_CL_VPORT_XOFFSET_13
+    0x00000000, // PA_CL_VPORT_YSCALE_13
+    0x00000000, // PA_CL_VPORT_YOFFSET_13
+    0x00000000, // PA_CL_VPORT_ZSCALE_13
+    0x00000000, // PA_CL_VPORT_ZOFFSET_13
+    0x00000000, // PA_CL_VPORT_XSCALE_14
+    0x00000000, // PA_CL_VPORT_XOFFSET_14
+    0x00000000, // PA_CL_VPORT_YSCALE_14
+    0x00000000, // PA_CL_VPORT_YOFFSET_14
+    0x00000000, // PA_CL_VPORT_ZSCALE_14
+    0x00000000, // PA_CL_VPORT_ZOFFSET_14
+    0x00000000, // PA_CL_VPORT_XSCALE_15
+    0x00000000, // PA_CL_VPORT_XOFFSET_15
+    0x00000000, // PA_CL_VPORT_YSCALE_15
+    0x00000000, // PA_CL_VPORT_YOFFSET_15
+    0x00000000, // PA_CL_VPORT_ZSCALE_15
+    0x00000000, // PA_CL_VPORT_ZOFFSET_15
+    0x00000000, // PA_CL_UCP_0_X
+    0x00000000, // PA_CL_UCP_0_Y
+    0x00000000, // PA_CL_UCP_0_Z
+    0x00000000, // PA_CL_UCP_0_W
+    0x00000000, // PA_CL_UCP_1_X
+    0x00000000, // PA_CL_UCP_1_Y
+    0x00000000, // PA_CL_UCP_1_Z
+    0x00000000, // PA_CL_UCP_1_W
+    0x00000000, // PA_CL_UCP_2_X
+    0x00000000, // PA_CL_UCP_2_Y
+    0x00000000, // PA_CL_UCP_2_Z
+    0x00000000, // PA_CL_UCP_2_W
+    0x00000000, // PA_CL_UCP_3_X
+    0x00000000, // PA_CL_UCP_3_Y
+    0x00000000, // PA_CL_UCP_3_Z
+    0x00000000, // PA_CL_UCP_3_W
+    0x00000000, // PA_CL_UCP_4_X
+    0x00000000, // PA_CL_UCP_4_Y
+    0x00000000, // PA_CL_UCP_4_Z
+    0x00000000, // PA_CL_UCP_4_W
+    0x00000000, // PA_CL_UCP_5_X
+    0x00000000, // PA_CL_UCP_5_Y
+    0x00000000, // PA_CL_UCP_5_Z
+    0x00000000, // PA_CL_UCP_5_W
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SPI_PS_INPUT_CNTL_0
+    0x00000000, // SPI_PS_INPUT_CNTL_1
+    0x00000000, // SPI_PS_INPUT_CNTL_2
+    0x00000000, // SPI_PS_INPUT_CNTL_3
+    0x00000000, // SPI_PS_INPUT_CNTL_4
+    0x00000000, // SPI_PS_INPUT_CNTL_5
+    0x00000000, // SPI_PS_INPUT_CNTL_6
+    0x00000000, // SPI_PS_INPUT_CNTL_7
+    0x00000000, // SPI_PS_INPUT_CNTL_8
+    0x00000000, // SPI_PS_INPUT_CNTL_9
+    0x00000000, // SPI_PS_INPUT_CNTL_10
+    0x00000000, // SPI_PS_INPUT_CNTL_11
+    0x00000000, // SPI_PS_INPUT_CNTL_12
+    0x00000000, // SPI_PS_INPUT_CNTL_13
+    0x00000000, // SPI_PS_INPUT_CNTL_14
+    0x00000000, // SPI_PS_INPUT_CNTL_15
+    0x00000000, // SPI_PS_INPUT_CNTL_16
+    0x00000000, // SPI_PS_INPUT_CNTL_17
+    0x00000000, // SPI_PS_INPUT_CNTL_18
+    0x00000000, // SPI_PS_INPUT_CNTL_19
+    0x00000000, // SPI_PS_INPUT_CNTL_20
+    0x00000000, // SPI_PS_INPUT_CNTL_21
+    0x00000000, // SPI_PS_INPUT_CNTL_22
+    0x00000000, // SPI_PS_INPUT_CNTL_23
+    0x00000000, // SPI_PS_INPUT_CNTL_24
+    0x00000000, // SPI_PS_INPUT_CNTL_25
+    0x00000000, // SPI_PS_INPUT_CNTL_26
+    0x00000000, // SPI_PS_INPUT_CNTL_27
+    0x00000000, // SPI_PS_INPUT_CNTL_28
+    0x00000000, // SPI_PS_INPUT_CNTL_29
+    0x00000000, // SPI_PS_INPUT_CNTL_30
+    0x00000000, // SPI_PS_INPUT_CNTL_31
+    0x00000000, // SPI_VS_OUT_CONFIG
+    0, // HOLE
+    0x00000000, // SPI_PS_INPUT_ENA
+    0x00000000, // SPI_PS_INPUT_ADDR
+    0x00000000, // SPI_INTERP_CONTROL_0
+    0x00000002, // SPI_PS_IN_CONTROL
+    0, // HOLE
+    0x00000000, // SPI_BARYC_CNTL
+    0, // HOLE
+    0x00000000, // SPI_TMPRING_SIZE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // SPI_SHADER_POS_FORMAT
+    0x00000000, // SPI_SHADER_Z_FORMAT
+    0x00000000, // SPI_SHADER_COL_FORMAT
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // CB_BLEND0_CONTROL
+    0x00000000, // CB_BLEND1_CONTROL
+    0x00000000, // CB_BLEND2_CONTROL
+    0x00000000, // CB_BLEND3_CONTROL
+    0x00000000, // CB_BLEND4_CONTROL
+    0x00000000, // CB_BLEND5_CONTROL
+    0x00000000, // CB_BLEND6_CONTROL
+    0x00000000, // CB_BLEND7_CONTROL
+};
+static const unsigned int vi_SECT_CONTEXT_def_3[] =
+{
+    0x00000000, // PA_CL_POINT_X_RAD
+    0x00000000, // PA_CL_POINT_Y_RAD
+    0x00000000, // PA_CL_POINT_SIZE
+    0x00000000, // PA_CL_POINT_CULL_RAD
+    0x00000000, // VGT_DMA_BASE_HI
+    0x00000000, // VGT_DMA_BASE
+};
+static const unsigned int vi_SECT_CONTEXT_def_4[] =
+{
+    0x00000000, // DB_DEPTH_CONTROL
+    0x00000000, // DB_EQAA
+    0x00000000, // CB_COLOR_CONTROL
+    0x00000000, // DB_SHADER_CONTROL
+    0x00090000, // PA_CL_CLIP_CNTL
+    0x00000004, // PA_SU_SC_MODE_CNTL
+    0x00000000, // PA_CL_VTE_CNTL
+    0x00000000, // PA_CL_VS_OUT_CNTL
+    0x00000000, // PA_CL_NANINF_CNTL
+    0x00000000, // PA_SU_LINE_STIPPLE_CNTL
+    0x00000000, // PA_SU_LINE_STIPPLE_SCALE
+    0x00000000, // PA_SU_PRIM_FILTER_CNTL
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // PA_SU_POINT_SIZE
+    0x00000000, // PA_SU_POINT_MINMAX
+    0x00000000, // PA_SU_LINE_CNTL
+    0x00000000, // PA_SC_LINE_STIPPLE
+    0x00000000, // VGT_OUTPUT_PATH_CNTL
+    0x00000000, // VGT_HOS_CNTL
+    0x00000000, // VGT_HOS_MAX_TESS_LEVEL
+    0x00000000, // VGT_HOS_MIN_TESS_LEVEL
+    0x00000000, // VGT_HOS_REUSE_DEPTH
+    0x00000000, // VGT_GROUP_PRIM_TYPE
+    0x00000000, // VGT_GROUP_FIRST_DECR
+    0x00000000, // VGT_GROUP_DECR
+    0x00000000, // VGT_GROUP_VECT_0_CNTL
+    0x00000000, // VGT_GROUP_VECT_1_CNTL
+    0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
+    0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
+    0x00000000, // VGT_GS_MODE
+    0x00000000, // VGT_GS_ONCHIP_CNTL
+    0x00000000, // PA_SC_MODE_CNTL_0
+    0x00000000, // PA_SC_MODE_CNTL_1
+    0x00000000, // VGT_ENHANCE
+    0x00000100, // VGT_GS_PER_ES
+    0x00000080, // VGT_ES_PER_GS
+    0x00000002, // VGT_GS_PER_VS
+    0x00000000, // VGT_GSVS_RING_OFFSET_1
+    0x00000000, // VGT_GSVS_RING_OFFSET_2
+    0x00000000, // VGT_GSVS_RING_OFFSET_3
+    0x00000000, // VGT_GS_OUT_PRIM_TYPE
+    0x00000000, // IA_ENHANCE
+};
+static const unsigned int vi_SECT_CONTEXT_def_5[] =
+{
+    0x00000000, // WD_ENHANCE
+    0x00000000, // VGT_PRIMITIVEID_EN
+};
+static const unsigned int vi_SECT_CONTEXT_def_6[] =
+{
+    0x00000000, // VGT_PRIMITIVEID_RESET
+};
+static const unsigned int vi_SECT_CONTEXT_def_7[] =
+{
+    0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_INSTANCE_STEP_RATE_0
+    0x00000000, // VGT_INSTANCE_STEP_RATE_1
+    0x000000ff, // IA_MULTI_VGT_PARAM
+    0x00000000, // VGT_ESGS_RING_ITEMSIZE
+    0x00000000, // VGT_GSVS_RING_ITEMSIZE
+    0x00000000, // VGT_REUSE_OFF
+    0x00000000, // VGT_VTX_CNT_EN
+    0x00000000, // DB_HTILE_SURFACE
+    0x00000000, // DB_SRESULTS_COMPARE_STATE0
+    0x00000000, // DB_SRESULTS_COMPARE_STATE1
+    0x00000000, // DB_PRELOAD_CONTROL
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
+    0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
+    0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+    0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+    0, // HOLE
+    0x00000000, // VGT_GS_MAX_VERT_OUT
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // VGT_TESS_DISTRIBUTION
+    0x00000000, // VGT_SHADER_STAGES_EN
+    0x00000000, // VGT_LS_HS_CONFIG
+    0x00000000, // VGT_GS_VERT_ITEMSIZE
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_1
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_2
+    0x00000000, // VGT_GS_VERT_ITEMSIZE_3
+    0x00000000, // VGT_TF_PARAM
+    0x00000000, // DB_ALPHA_TO_MASK
+    0x00000000, // VGT_DISPATCH_DRAW_INDEX
+    0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
+    0x00000000, // PA_SU_POLY_OFFSET_CLAMP
+    0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
+    0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
+    0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
+    0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
+    0x00000000, // VGT_GS_INSTANCE_CNT
+    0x00000000, // VGT_STRMOUT_CONFIG
+    0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x00000000, // PA_SC_CENTROID_PRIORITY_0
+    0x00000000, // PA_SC_CENTROID_PRIORITY_1
+    0x00001000, // PA_SC_LINE_CNTL
+    0x00000000, // PA_SC_AA_CONFIG
+    0x00000005, // PA_SU_VTX_CNTL
+    0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
+    0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
+    0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
+    0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+    0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+    0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
+    0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0, // HOLE
+    0x0000001e, // VGT_VERTEX_REUSE_BLOCK_CNTL
+    0x00000020, // VGT_OUT_DEALLOC_CNTL
+    0x00000000, // CB_COLOR0_BASE
+    0x00000000, // CB_COLOR0_PITCH
+    0x00000000, // CB_COLOR0_SLICE
+    0x00000000, // CB_COLOR0_VIEW
+    0x00000000, // CB_COLOR0_INFO
+    0x00000000, // CB_COLOR0_ATTRIB
+    0x00000000, // CB_COLOR0_DCC_CONTROL
+    0x00000000, // CB_COLOR0_CMASK
+    0x00000000, // CB_COLOR0_CMASK_SLICE
+    0x00000000, // CB_COLOR0_FMASK
+    0x00000000, // CB_COLOR0_FMASK_SLICE
+    0x00000000, // CB_COLOR0_CLEAR_WORD0
+    0x00000000, // CB_COLOR0_CLEAR_WORD1
+    0x00000000, // CB_COLOR0_DCC_BASE
+    0, // HOLE
+    0x00000000, // CB_COLOR1_BASE
+    0x00000000, // CB_COLOR1_PITCH
+    0x00000000, // CB_COLOR1_SLICE
+    0x00000000, // CB_COLOR1_VIEW
+    0x00000000, // CB_COLOR1_INFO
+    0x00000000, // CB_COLOR1_ATTRIB
+    0x00000000, // CB_COLOR1_DCC_CONTROL
+    0x00000000, // CB_COLOR1_CMASK
+    0x00000000, // CB_COLOR1_CMASK_SLICE
+    0x00000000, // CB_COLOR1_FMASK
+    0x00000000, // CB_COLOR1_FMASK_SLICE
+    0x00000000, // CB_COLOR1_CLEAR_WORD0
+    0x00000000, // CB_COLOR1_CLEAR_WORD1
+    0x00000000, // CB_COLOR1_DCC_BASE
+    0, // HOLE
+    0x00000000, // CB_COLOR2_BASE
+    0x00000000, // CB_COLOR2_PITCH
+    0x00000000, // CB_COLOR2_SLICE
+    0x00000000, // CB_COLOR2_VIEW
+    0x00000000, // CB_COLOR2_INFO
+    0x00000000, // CB_COLOR2_ATTRIB
+    0x00000000, // CB_COLOR2_DCC_CONTROL
+    0x00000000, // CB_COLOR2_CMASK
+    0x00000000, // CB_COLOR2_CMASK_SLICE
+    0x00000000, // CB_COLOR2_FMASK
+    0x00000000, // CB_COLOR2_FMASK_SLICE
+    0x00000000, // CB_COLOR2_CLEAR_WORD0
+    0x00000000, // CB_COLOR2_CLEAR_WORD1
+    0x00000000, // CB_COLOR2_DCC_BASE
+    0, // HOLE
+    0x00000000, // CB_COLOR3_BASE
+    0x00000000, // CB_COLOR3_PITCH
+    0x00000000, // CB_COLOR3_SLICE
+    0x00000000, // CB_COLOR3_VIEW
+    0x00000000, // CB_COLOR3_INFO
+    0x00000000, // CB_COLOR3_ATTRIB
+    0x00000000, // CB_COLOR3_DCC_CONTROL
+    0x00000000, // CB_COLOR3_CMASK
+    0x00000000, // CB_COLOR3_CMASK_SLICE
+    0x00000000, // CB_COLOR3_FMASK
+    0x00000000, // CB_COLOR3_FMASK_SLICE
+    0x00000000, // CB_COLOR3_CLEAR_WORD0
+    0x00000000, // CB_COLOR3_CLEAR_WORD1
+    0x00000000, // CB_COLOR3_DCC_BASE
+    0, // HOLE
+    0x00000000, // CB_COLOR4_BASE
+    0x00000000, // CB_COLOR4_PITCH
+    0x00000000, // CB_COLOR4_SLICE
+    0x00000000, // CB_COLOR4_VIEW
+    0x00000000, // CB_COLOR4_INFO
+    0x00000000, // CB_COLOR4_ATTRIB
+    0x00000000, // CB_COLOR4_DCC_CONTROL
+    0x00000000, // CB_COLOR4_CMASK
+    0x00000000, // CB_COLOR4_CMASK_SLICE
+    0x00000000, // CB_COLOR4_FMASK
+    0x00000000, // CB_COLOR4_FMASK_SLICE
+    0x00000000, // CB_COLOR4_CLEAR_WORD0
+    0x00000000, // CB_COLOR4_CLEAR_WORD1
+    0x00000000, // CB_COLOR4_DCC_BASE
+    0, // HOLE
+    0x00000000, // CB_COLOR5_BASE
+    0x00000000, // CB_COLOR5_PITCH
+    0x00000000, // CB_COLOR5_SLICE
+    0x00000000, // CB_COLOR5_VIEW
+    0x00000000, // CB_COLOR5_INFO
+    0x00000000, // CB_COLOR5_ATTRIB
+    0x00000000, // CB_COLOR5_DCC_CONTROL
+    0x00000000, // CB_COLOR5_CMASK
+    0x00000000, // CB_COLOR5_CMASK_SLICE
+    0x00000000, // CB_COLOR5_FMASK
+    0x00000000, // CB_COLOR5_FMASK_SLICE
+    0x00000000, // CB_COLOR5_CLEAR_WORD0
+    0x00000000, // CB_COLOR5_CLEAR_WORD1
+    0x00000000, // CB_COLOR5_DCC_BASE
+    0, // HOLE
+    0x00000000, // CB_COLOR6_BASE
+    0x00000000, // CB_COLOR6_PITCH
+    0x00000000, // CB_COLOR6_SLICE
+    0x00000000, // CB_COLOR6_VIEW
+    0x00000000, // CB_COLOR6_INFO
+    0x00000000, // CB_COLOR6_ATTRIB
+    0x00000000, // CB_COLOR6_DCC_CONTROL
+    0x00000000, // CB_COLOR6_CMASK
+    0x00000000, // CB_COLOR6_CMASK_SLICE
+    0x00000000, // CB_COLOR6_FMASK
+    0x00000000, // CB_COLOR6_FMASK_SLICE
+    0x00000000, // CB_COLOR6_CLEAR_WORD0
+    0x00000000, // CB_COLOR6_CLEAR_WORD1
+    0x00000000, // CB_COLOR6_DCC_BASE
+    0, // HOLE
+    0x00000000, // CB_COLOR7_BASE
+    0x00000000, // CB_COLOR7_PITCH
+    0x00000000, // CB_COLOR7_SLICE
+    0x00000000, // CB_COLOR7_VIEW
+    0x00000000, // CB_COLOR7_INFO
+    0x00000000, // CB_COLOR7_ATTRIB
+    0x00000000, // CB_COLOR7_DCC_CONTROL
+    0x00000000, // CB_COLOR7_CMASK
+    0x00000000, // CB_COLOR7_CMASK_SLICE
+    0x00000000, // CB_COLOR7_FMASK
+    0x00000000, // CB_COLOR7_FMASK_SLICE
+    0x00000000, // CB_COLOR7_CLEAR_WORD0
+    0x00000000, // CB_COLOR7_CLEAR_WORD1
+};
+static const struct cs_extent_def vi_SECT_CONTEXT_defs[] =
+{
+    {vi_SECT_CONTEXT_def_1, 0x0000a000, 212 },
+    {vi_SECT_CONTEXT_def_2, 0x0000a0d6, 274 },
+    {vi_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
+    {vi_SECT_CONTEXT_def_4, 0x0000a200, 157 },
+    {vi_SECT_CONTEXT_def_5, 0x0000a2a0, 2 },
+    {vi_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
+    {vi_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
+    { 0, 0, 0 }
+};
+static const struct cs_section_def vi_cs_data[] = {
+    { vi_SECT_CONTEXT_defs, SECT_CONTEXT },
+    { 0, SECT_NONE }
+};

+ 1814 - 0
drivers/gpu/drm/amd/amdgpu/cz_dpm.c

@@ -0,0 +1,1814 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/seq_file.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_atombios.h"
+#include "vid.h"
+#include "vi_dpm.h"
+#include "amdgpu_dpm.h"
+#include "cz_dpm.h"
+#include "cz_ppsmc.h"
+#include "atom.h"
+
+#include "smu/smu_8_0_d.h"
+#include "smu/smu_8_0_sh_mask.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "bif/bif_5_1_d.h"
+#include "gfx_v8_0.h"
+
+static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate);
+
+static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps)
+{
+	struct cz_ps *ps = rps->ps_priv;
+
+	return ps;
+}
+
+static struct cz_power_info *cz_get_pi(struct amdgpu_device *adev)
+{
+	struct cz_power_info *pi = adev->pm.dpm.priv;
+
+	return pi;
+}
+
+static uint16_t cz_convert_8bit_index_to_voltage(struct amdgpu_device *adev,
+							uint16_t voltage)
+{
+	uint16_t tmp = 6200 - voltage * 25;
+
+	return tmp;
+}
+
+static void cz_construct_max_power_limits_table(struct amdgpu_device *adev,
+				struct amdgpu_clock_and_voltage_limits *table)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	struct amdgpu_clock_voltage_dependency_table *dep_table =
+		&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+	if (dep_table->count > 0) {
+		table->sclk = dep_table->entries[dep_table->count - 1].clk;
+		table->vddc = cz_convert_8bit_index_to_voltage(adev,
+				dep_table->entries[dep_table->count - 1].v);
+	}
+
+	table->mclk = pi->sys_info.nbp_memory_clock[0];
+
+}
+
+union igp_info {
+	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9;
+};
+
+static int cz_parse_sys_info_table(struct amdgpu_device *adev)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+	union igp_info *igp_info;
+	u8 frev, crev;
+	u16 data_offset;
+	int i = 0;
+
+	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		igp_info = (union igp_info *)(mode_info->atom_context->bios +
+					      data_offset);
+
+		if (crev != 9) {
+			DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+			return -EINVAL;
+		}
+		pi->sys_info.bootup_sclk =
+			le32_to_cpu(igp_info->info_9.ulBootUpEngineClock);
+		pi->sys_info.bootup_uma_clk =
+			le32_to_cpu(igp_info->info_9.ulBootUpUMAClock);
+		pi->sys_info.dentist_vco_freq =
+			le32_to_cpu(igp_info->info_9.ulDentistVCOFreq);
+		pi->sys_info.bootup_nb_voltage_index =
+			le16_to_cpu(igp_info->info_9.usBootUpNBVoltage);
+
+		if (igp_info->info_9.ucHtcTmpLmt == 0)
+			pi->sys_info.htc_tmp_lmt = 203;
+		else
+			pi->sys_info.htc_tmp_lmt = igp_info->info_9.ucHtcTmpLmt;
+
+		if (igp_info->info_9.ucHtcHystLmt == 0)
+			pi->sys_info.htc_hyst_lmt = 5;
+		else
+			pi->sys_info.htc_hyst_lmt = igp_info->info_9.ucHtcHystLmt;
+
+		if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
+			DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
+			return -EINVAL;
+		}
+
+		if (le32_to_cpu(igp_info->info_9.ulSystemConfig) & (1 << 3) &&
+				pi->enable_nb_ps_policy)
+			pi->sys_info.nb_dpm_enable = true;
+		else
+			pi->sys_info.nb_dpm_enable = false;
+
+		for (i = 0; i < CZ_NUM_NBPSTATES; i++) {
+			if (i < CZ_NUM_NBPMEMORY_CLOCK)
+				pi->sys_info.nbp_memory_clock[i] =
+				le32_to_cpu(igp_info->info_9.ulNbpStateMemclkFreq[i]);
+			pi->sys_info.nbp_n_clock[i] =
+			le32_to_cpu(igp_info->info_9.ulNbpStateNClkFreq[i]);
+		}
+
+		for (i = 0; i < CZ_MAX_DISPLAY_CLOCK_LEVEL; i++)
+			pi->sys_info.display_clock[i] =
+			le32_to_cpu(igp_info->info_9.sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
+
+		for (i = 0; i < CZ_NUM_NBPSTATES; i++)
+			pi->sys_info.nbp_voltage_index[i] =
+				le32_to_cpu(igp_info->info_9.usNBPStateVoltage[i]);
+
+		if (le32_to_cpu(igp_info->info_9.ulGPUCapInfo) &
+			SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
+			pi->caps_enable_dfs_bypass = true;
+
+		pi->sys_info.uma_channel_number =
+			igp_info->info_9.ucUMAChannelNumber;
+
+		cz_construct_max_power_limits_table(adev,
+			&adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
+	}
+
+	return 0;
+}
+
+static void cz_patch_voltage_values(struct amdgpu_device *adev)
+{
+	int i;
+	struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
+		&adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+	struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
+		&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+	struct amdgpu_clock_voltage_dependency_table *acp_table =
+		&adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+
+	if (uvd_table->count) {
+		for (i = 0; i < uvd_table->count; i++)
+			uvd_table->entries[i].v =
+				cz_convert_8bit_index_to_voltage(adev,
+						uvd_table->entries[i].v);
+	}
+
+	if (vce_table->count) {
+		for (i = 0; i < vce_table->count; i++)
+			vce_table->entries[i].v =
+				cz_convert_8bit_index_to_voltage(adev,
+						vce_table->entries[i].v);
+	}
+
+	if (acp_table->count) {
+		for (i = 0; i < acp_table->count; i++)
+			acp_table->entries[i].v =
+				cz_convert_8bit_index_to_voltage(adev,
+						acp_table->entries[i].v);
+	}
+
+}
+
+static void cz_construct_boot_state(struct amdgpu_device *adev)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+
+	pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
+	pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
+	pi->boot_pl.ds_divider_index = 0;
+	pi->boot_pl.ss_divider_index = 0;
+	pi->boot_pl.allow_gnb_slow = 1;
+	pi->boot_pl.force_nbp_state = 0;
+	pi->boot_pl.display_wm = 0;
+	pi->boot_pl.vce_wm = 0;
+
+}
+
+static void cz_patch_boot_state(struct amdgpu_device *adev,
+				struct cz_ps *ps)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+
+	ps->num_levels = 1;
+	ps->levels[0] = pi->boot_pl;
+}
+
+union pplib_clock_info {
+	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+	struct _ATOM_PPLIB_CZ_CLOCK_INFO carrizo;
+};
+
+static void cz_parse_pplib_clock_info(struct amdgpu_device *adev,
+					struct amdgpu_ps *rps, int index,
+					union pplib_clock_info *clock_info)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	struct cz_ps *ps = cz_get_ps(rps);
+	struct cz_pl *pl = &ps->levels[index];
+	struct amdgpu_clock_voltage_dependency_table *table =
+			&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+	pl->sclk = table->entries[clock_info->carrizo.index].clk;
+	pl->vddc_index = table->entries[clock_info->carrizo.index].v;
+
+	ps->num_levels = index + 1;
+
+	if (pi->caps_sclk_ds) {
+		pl->ds_divider_index = 5;
+		pl->ss_divider_index = 5;
+	}
+
+}
+
+static void cz_parse_pplib_non_clock_info(struct amdgpu_device *adev,
+			struct amdgpu_ps *rps,
+			struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+			u8 table_rev)
+{
+	struct cz_ps *ps = cz_get_ps(rps);
+
+	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+	rps->class = le16_to_cpu(non_clock_info->usClassification);
+	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+	} else {
+		rps->vclk = 0;
+		rps->dclk = 0;
+	}
+
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+		adev->pm.dpm.boot_ps = rps;
+		cz_patch_boot_state(adev, ps);
+	}
+	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+		adev->pm.dpm.uvd_ps = rps;
+
+}
+
+union power_info {
+	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
+	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
+};
+
+union pplib_power_state {
+	struct _ATOM_PPLIB_STATE v1;
+	struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static int cz_parse_power_table(struct amdgpu_device *adev)
+{
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i, j, k, non_clock_array_index, clock_array_index;
+	union pplib_clock_info *clock_info;
+	struct _StateArray *state_array;
+	struct _ClockInfoArray *clock_info_array;
+	struct _NonClockInfoArray *non_clock_info_array;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+	u8 *power_state_offset;
+	struct cz_ps *ps;
+
+	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+				    &frev, &crev, &data_offset))
+		return -EINVAL;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	state_array = (struct _StateArray *)
+		(mode_info->atom_context->bios + data_offset +
+		le16_to_cpu(power_info->pplib.usStateArrayOffset));
+	clock_info_array = (struct _ClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+	non_clock_info_array = (struct _NonClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+
+	adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
+					state_array->ucNumEntries, GFP_KERNEL);
+
+	if (!adev->pm.dpm.ps)
+		return -ENOMEM;
+
+	power_state_offset = (u8 *)state_array->states;
+	adev->pm.dpm.platform_caps =
+			le32_to_cpu(power_info->pplib.ulPlatformCaps);
+	adev->pm.dpm.backbias_response_time =
+			le16_to_cpu(power_info->pplib.usBackbiasTime);
+	adev->pm.dpm.voltage_response_time =
+			le16_to_cpu(power_info->pplib.usVoltageTime);
+
+	for (i = 0; i < state_array->ucNumEntries; i++) {
+		power_state = (union pplib_power_state *)power_state_offset;
+		non_clock_array_index = power_state->v2.nonClockInfoIndex;
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			&non_clock_info_array->nonClockInfo[non_clock_array_index];
+
+		ps = kzalloc(sizeof(struct cz_ps), GFP_KERNEL);
+		if (ps == NULL) {
+			kfree(adev->pm.dpm.ps);
+			return -ENOMEM;
+		}
+
+		adev->pm.dpm.ps[i].ps_priv = ps;
+		k = 0;
+		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+			clock_array_index = power_state->v2.clockInfoIndex[j];
+			if (clock_array_index >= clock_info_array->ucNumEntries)
+				continue;
+			if (k >= CZ_MAX_HARDWARE_POWERLEVELS)
+				break;
+			clock_info = (union pplib_clock_info *)
+				&clock_info_array->clockInfo[clock_array_index *
+				clock_info_array->ucEntrySize];
+			cz_parse_pplib_clock_info(adev, &adev->pm.dpm.ps[i],
+				k, clock_info);
+			k++;
+		}
+		cz_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
+					non_clock_info,
+					non_clock_info_array->ucEntrySize);
+		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+	}
+	adev->pm.dpm.num_ps = state_array->ucNumEntries;
+
+	return 0;
+}
+
+static int cz_process_firmware_header(struct amdgpu_device *adev)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	u32 tmp;
+	int ret;
+
+	ret = cz_read_smc_sram_dword(adev, SMU8_FIRMWARE_HEADER_LOCATION +
+				     offsetof(struct SMU8_Firmware_Header,
+				     DpmTable),
+				     &tmp, pi->sram_end);
+
+	if (ret == 0)
+		pi->dpm_table_start = tmp;
+
+	return ret;
+}
+
+static int cz_dpm_init(struct amdgpu_device *adev)
+{
+	struct cz_power_info *pi;
+	int ret, i;
+
+	pi = kzalloc(sizeof(struct cz_power_info), GFP_KERNEL);
+	if (NULL == pi)
+		return -ENOMEM;
+
+	adev->pm.dpm.priv = pi;
+
+	ret = amdgpu_get_platform_caps(adev);
+	if (ret)
+		return ret;
+
+	ret = amdgpu_parse_extended_power_table(adev);
+	if (ret)
+		return ret;
+
+	pi->sram_end = SMC_RAM_END;
+
+	/* set up DPM defaults */
+	for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++)
+		pi->active_target[i] = CZ_AT_DFLT;
+
+	pi->mgcg_cgtt_local0 = 0x0;
+	pi->mgcg_cgtt_local1 = 0x0;
+	pi->clock_slow_down_step = 25000;
+	pi->skip_clock_slow_down = 1;
+	pi->enable_nb_ps_policy = 1;
+	pi->caps_power_containment = true;
+	pi->caps_cac = true;
+	pi->didt_enabled = false;
+	if (pi->didt_enabled) {
+		pi->caps_sq_ramping = true;
+		pi->caps_db_ramping = true;
+		pi->caps_td_ramping = true;
+		pi->caps_tcp_ramping = true;
+	}
+	pi->caps_sclk_ds = true;
+	pi->voting_clients = 0x00c00033;
+	pi->auto_thermal_throttling_enabled = true;
+	pi->bapm_enabled = false;
+	pi->disable_nb_ps3_in_battery = false;
+	pi->voltage_drop_threshold = 0;
+	pi->caps_sclk_throttle_low_notification = false;
+	pi->gfx_pg_threshold = 500;
+	pi->caps_fps = true;
+	/* uvd */
+	pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false;
+	pi->caps_uvd_dpm = true;
+	/* vce */
+	pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false;
+	pi->caps_vce_dpm = true;
+	/* acp */
+	pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false;
+	pi->caps_acp_dpm = true;
+
+	pi->caps_stable_power_state = false;
+	pi->nb_dpm_enabled_by_driver = true;
+	pi->nb_dpm_enabled = false;
+	pi->caps_voltage_island = false;
+	/* flags which indicate need to upload pptable */
+	pi->need_pptable_upload = true;
+
+	ret = cz_parse_sys_info_table(adev);
+	if (ret)
+		return ret;
+
+	cz_patch_voltage_values(adev);
+	cz_construct_boot_state(adev);
+
+	ret = cz_parse_power_table(adev);
+	if (ret)
+		return ret;
+
+	ret = cz_process_firmware_header(adev);
+	if (ret)
+		return ret;
+
+	pi->dpm_enabled = true;
+	pi->uvd_dynamic_pg = false;
+
+	return 0;
+}
+
+static void cz_dpm_fini(struct amdgpu_device *adev)
+{
+	int i;
+
+	for (i = 0; i < adev->pm.dpm.num_ps; i++)
+		kfree(adev->pm.dpm.ps[i].ps_priv);
+
+	kfree(adev->pm.dpm.ps);
+	kfree(adev->pm.dpm.priv);
+	amdgpu_free_extended_power_table(adev);
+}
+
+static void
+cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+					       struct seq_file *m)
+{
+	struct amdgpu_clock_voltage_dependency_table *table =
+		&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+	u32 current_index =
+		(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
+		TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
+		TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
+	u32 sclk, tmp;
+	u16 vddc;
+
+	if (current_index >= NUM_SCLK_LEVELS) {
+		seq_printf(m, "invalid dpm profile %d\n", current_index);
+	} else {
+		sclk = table->entries[current_index].clk;
+		tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
+			SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
+			SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
+		vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
+		seq_printf(m, "power level %d    sclk: %u vddc: %u\n",
+			   current_index, sclk, vddc);
+	}
+}
+
+static void cz_dpm_print_power_state(struct amdgpu_device *adev,
+					struct amdgpu_ps *rps)
+{
+	int i;
+	struct cz_ps *ps = cz_get_ps(rps);
+
+	amdgpu_dpm_print_class_info(rps->class, rps->class2);
+	amdgpu_dpm_print_cap_info(rps->caps);
+
+	DRM_INFO("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+	for (i = 0; i < ps->num_levels; i++) {
+		struct cz_pl *pl = &ps->levels[i];
+
+		DRM_INFO("\t\tpower level %d    sclk: %u vddc: %u\n",
+		       i, pl->sclk,
+		       cz_convert_8bit_index_to_voltage(adev, pl->vddc_index));
+	}
+
+	amdgpu_dpm_print_ps_status(adev, rps);
+}
+
+static void cz_dpm_set_funcs(struct amdgpu_device *adev);
+
+static int cz_dpm_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	cz_dpm_set_funcs(adev);
+
+	return 0;
+}
+
+
+static int cz_dpm_late_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	/* powerdown unused blocks for now */
+	cz_dpm_powergate_uvd(adev, true);
+
+	return 0;
+}
+
+static int cz_dpm_sw_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int ret = 0;
+	/* fix me to add thermal support TODO */
+
+	/* default to balanced state */
+	adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
+	adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
+	adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
+	adev->pm.default_sclk = adev->clock.default_sclk;
+	adev->pm.default_mclk = adev->clock.default_mclk;
+	adev->pm.current_sclk = adev->clock.default_sclk;
+	adev->pm.current_mclk = adev->clock.default_mclk;
+	adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
+
+	if (amdgpu_dpm == 0)
+		return 0;
+
+	mutex_lock(&adev->pm.mutex);
+	ret = cz_dpm_init(adev);
+	if (ret)
+		goto dpm_init_failed;
+
+	adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+	if (amdgpu_dpm == 1)
+		amdgpu_pm_print_power_states(adev);
+
+	ret = amdgpu_pm_sysfs_init(adev);
+	if (ret)
+		goto dpm_init_failed;
+
+	mutex_unlock(&adev->pm.mutex);
+	DRM_INFO("amdgpu: dpm initialized\n");
+
+	return 0;
+
+dpm_init_failed:
+	cz_dpm_fini(adev);
+	mutex_unlock(&adev->pm.mutex);
+	DRM_ERROR("amdgpu: dpm initialization failed\n");
+
+	return ret;
+}
+
+static int cz_dpm_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	mutex_lock(&adev->pm.mutex);
+	amdgpu_pm_sysfs_fini(adev);
+	cz_dpm_fini(adev);
+	mutex_unlock(&adev->pm.mutex);
+
+	return 0;
+}
+
+static void cz_reset_ap_mask(struct amdgpu_device *adev)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+
+	pi->active_process_mask = 0;
+
+}
+
+static int cz_dpm_download_pptable_from_smu(struct amdgpu_device *adev,
+							void **table)
+{
+	int ret = 0;
+
+	ret = cz_smu_download_pptable(adev, table);
+
+	return ret;
+}
+
+static int cz_dpm_upload_pptable_to_smu(struct amdgpu_device *adev)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	struct SMU8_Fusion_ClkTable *clock_table;
+	struct atom_clock_dividers dividers;
+	void *table = NULL;
+	uint8_t i = 0;
+	int ret = 0;
+
+	struct amdgpu_clock_voltage_dependency_table *vddc_table =
+		&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+	struct amdgpu_clock_voltage_dependency_table *vddgfx_table =
+		&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk;
+	struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
+		&adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+	struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
+		&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+	struct amdgpu_clock_voltage_dependency_table *acp_table =
+		&adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+
+	if (!pi->need_pptable_upload)
+		return 0;
+
+	ret = cz_dpm_download_pptable_from_smu(adev, &table);
+	if (ret) {
+		DRM_ERROR("amdgpu: Failed to get power play table from SMU!\n");
+		return -EINVAL;
+	}
+
+	clock_table = (struct SMU8_Fusion_ClkTable *)table;
+	/* patch clock table */
+	if (vddc_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
+			vddgfx_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
+			uvd_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
+			vce_table->count > CZ_MAX_HARDWARE_POWERLEVELS ||
+			acp_table->count > CZ_MAX_HARDWARE_POWERLEVELS) {
+		DRM_ERROR("amdgpu: Invalid Clock Voltage Dependency Table!\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) {
+
+		/* vddc sclk */
+		clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
+			(i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0;
+		clock_table->SclkBreakdownTable.ClkLevel[i].Frequency =
+			(i < vddc_table->count) ? vddc_table->entries[i].clk : 0;
+		ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+				clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
+				false, &dividers);
+		if (ret)
+			return ret;
+		clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid =
+						(uint8_t)dividers.post_divider;
+
+		/* vddgfx sclk */
+		clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid =
+			(i < vddgfx_table->count) ? (uint8_t)vddgfx_table->entries[i].v : 0;
+
+		/* acp breakdown */
+		clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid =
+			(i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0;
+		clock_table->AclkBreakdownTable.ClkLevel[i].Frequency =
+			(i < acp_table->count) ? acp_table->entries[i].clk : 0;
+		ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+				clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
+				false, &dividers);
+		if (ret)
+			return ret;
+		clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid =
+						(uint8_t)dividers.post_divider;
+
+		/* uvd breakdown */
+		clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid =
+			(i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
+		clock_table->VclkBreakdownTable.ClkLevel[i].Frequency =
+			(i < uvd_table->count) ? uvd_table->entries[i].vclk : 0;
+		ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+				clock_table->VclkBreakdownTable.ClkLevel[i].Frequency,
+				false, &dividers);
+		if (ret)
+			return ret;
+		clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid =
+						(uint8_t)dividers.post_divider;
+
+		clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid =
+			(i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
+		clock_table->DclkBreakdownTable.ClkLevel[i].Frequency =
+			(i < uvd_table->count) ? uvd_table->entries[i].dclk : 0;
+		ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+				clock_table->DclkBreakdownTable.ClkLevel[i].Frequency,
+				false, &dividers);
+		if (ret)
+			return ret;
+		clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid =
+						(uint8_t)dividers.post_divider;
+
+		/* vce breakdown */
+		clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid =
+			(i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0;
+		clock_table->EclkBreakdownTable.ClkLevel[i].Frequency =
+			(i < vce_table->count) ? vce_table->entries[i].ecclk : 0;
+		ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+				clock_table->EclkBreakdownTable.ClkLevel[i].Frequency,
+				false, &dividers);
+		if (ret)
+			return ret;
+		clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid =
+						(uint8_t)dividers.post_divider;
+	}
+
+	/* its time to upload to SMU */
+	ret = cz_smu_upload_pptable(adev);
+	if (ret) {
+		DRM_ERROR("amdgpu: Failed to put power play table to SMU!\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static void cz_init_sclk_limit(struct amdgpu_device *adev)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	struct amdgpu_clock_voltage_dependency_table *table =
+		&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+	uint32_t clock = 0, level;
+
+	if (!table || !table->count) {
+		DRM_ERROR("Invalid Voltage Dependency table.\n");
+		return;
+	}
+
+	pi->sclk_dpm.soft_min_clk = 0;
+	pi->sclk_dpm.hard_min_clk = 0;
+	cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
+	level = cz_get_argument(adev);
+	if (level < table->count)
+		clock = table->entries[level].clk;
+	else {
+		DRM_ERROR("Invalid SLCK Voltage Dependency table entry.\n");
+		clock = table->entries[table->count - 1].clk;
+	}
+
+	pi->sclk_dpm.soft_max_clk = clock;
+	pi->sclk_dpm.hard_max_clk = clock;
+
+}
+
+static void cz_init_uvd_limit(struct amdgpu_device *adev)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	struct amdgpu_uvd_clock_voltage_dependency_table *table =
+		&adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+	uint32_t clock = 0, level;
+
+	if (!table || !table->count) {
+		DRM_ERROR("Invalid Voltage Dependency table.\n");
+		return;
+	}
+
+	pi->uvd_dpm.soft_min_clk = 0;
+	pi->uvd_dpm.hard_min_clk = 0;
+	cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel);
+	level = cz_get_argument(adev);
+	if (level < table->count)
+		clock = table->entries[level].vclk;
+	else {
+		DRM_ERROR("Invalid UVD Voltage Dependency table entry.\n");
+		clock = table->entries[table->count - 1].vclk;
+	}
+
+	pi->uvd_dpm.soft_max_clk = clock;
+	pi->uvd_dpm.hard_max_clk = clock;
+
+}
+
+static void cz_init_vce_limit(struct amdgpu_device *adev)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	struct amdgpu_vce_clock_voltage_dependency_table *table =
+		&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+	uint32_t clock = 0, level;
+
+	if (!table || !table->count) {
+		DRM_ERROR("Invalid Voltage Dependency table.\n");
+		return;
+	}
+
+	pi->vce_dpm.soft_min_clk = 0;
+	pi->vce_dpm.hard_min_clk = 0;
+	cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel);
+	level = cz_get_argument(adev);
+	if (level < table->count)
+		clock = table->entries[level].evclk;
+	else {
+		/* future BIOS would fix this error */
+		DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n");
+		clock = table->entries[table->count - 1].evclk;
+	}
+
+	pi->vce_dpm.soft_max_clk = clock;
+	pi->vce_dpm.hard_max_clk = clock;
+
+}
+
+static void cz_init_acp_limit(struct amdgpu_device *adev)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	struct amdgpu_clock_voltage_dependency_table *table =
+		&adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+	uint32_t clock = 0, level;
+
+	if (!table || !table->count) {
+		DRM_ERROR("Invalid Voltage Dependency table.\n");
+		return;
+	}
+
+	pi->acp_dpm.soft_min_clk = 0;
+	pi->acp_dpm.hard_min_clk = 0;
+	cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxAclkLevel);
+	level = cz_get_argument(adev);
+	if (level < table->count)
+		clock = table->entries[level].clk;
+	else {
+		DRM_ERROR("Invalid ACP Voltage Dependency table entry.\n");
+		clock = table->entries[table->count - 1].clk;
+	}
+
+	pi->acp_dpm.soft_max_clk = clock;
+	pi->acp_dpm.hard_max_clk = clock;
+
+}
+
+static void cz_init_pg_state(struct amdgpu_device *adev)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+
+	pi->uvd_power_gated = false;
+	pi->vce_power_gated = false;
+	pi->acp_power_gated = false;
+
+}
+
+static void cz_init_sclk_threshold(struct amdgpu_device *adev)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+
+	pi->low_sclk_interrupt_threshold = 0;
+
+}
+
+static void cz_dpm_setup_asic(struct amdgpu_device *adev)
+{
+	cz_reset_ap_mask(adev);
+	cz_dpm_upload_pptable_to_smu(adev);
+	cz_init_sclk_limit(adev);
+	cz_init_uvd_limit(adev);
+	cz_init_vce_limit(adev);
+	cz_init_acp_limit(adev);
+	cz_init_pg_state(adev);
+	cz_init_sclk_threshold(adev);
+
+}
+
+static bool cz_check_smu_feature(struct amdgpu_device *adev,
+				uint32_t feature)
+{
+	uint32_t smu_feature = 0;
+	int ret;
+
+	ret = cz_send_msg_to_smc_with_parameter(adev,
+				PPSMC_MSG_GetFeatureStatus, 0);
+	if (ret) {
+		DRM_ERROR("Failed to get SMU features from SMC.\n");
+		return false;
+	} else {
+		smu_feature = cz_get_argument(adev);
+		if (feature & smu_feature)
+			return true;
+	}
+
+	return false;
+}
+
+static bool cz_check_for_dpm_enabled(struct amdgpu_device *adev)
+{
+	if (cz_check_smu_feature(adev,
+				SMU_EnabledFeatureScoreboard_SclkDpmOn))
+		return true;
+
+	return false;
+}
+
+static void cz_program_voting_clients(struct amdgpu_device *adev)
+{
+	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, PPCZ_VOTINGRIGHTSCLIENTS_DFLT0);
+}
+
+static void cz_clear_voting_clients(struct amdgpu_device *adev)
+{
+	WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
+}
+
+static int cz_start_dpm(struct amdgpu_device *adev)
+{
+	int ret = 0;
+
+	if (amdgpu_dpm) {
+		ret = cz_send_msg_to_smc_with_parameter(adev,
+				PPSMC_MSG_EnableAllSmuFeatures, SCLK_DPM_MASK);
+		if (ret) {
+			DRM_ERROR("SMU feature: SCLK_DPM enable failed\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int cz_stop_dpm(struct amdgpu_device *adev)
+{
+	int ret = 0;
+
+	if (amdgpu_dpm && adev->pm.dpm_enabled) {
+		ret = cz_send_msg_to_smc_with_parameter(adev,
+				PPSMC_MSG_DisableAllSmuFeatures, SCLK_DPM_MASK);
+		if (ret) {
+			DRM_ERROR("SMU feature: SCLK_DPM disable failed\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static uint32_t cz_get_sclk_level(struct amdgpu_device *adev,
+				uint32_t clock, uint16_t msg)
+{
+	int i = 0;
+	struct amdgpu_clock_voltage_dependency_table *table =
+		&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+	switch (msg) {
+	case PPSMC_MSG_SetSclkSoftMin:
+	case PPSMC_MSG_SetSclkHardMin:
+		for (i = 0; i < table->count; i++)
+			if (clock <= table->entries[i].clk)
+				break;
+		if (i == table->count)
+			i = table->count - 1;
+		break;
+	case PPSMC_MSG_SetSclkSoftMax:
+	case PPSMC_MSG_SetSclkHardMax:
+		for (i = table->count - 1; i >= 0; i--)
+			if (clock >= table->entries[i].clk)
+				break;
+		if (i < 0)
+			i = 0;
+		break;
+	default:
+		break;
+	}
+
+	return i;
+}
+
+static int cz_program_bootup_state(struct amdgpu_device *adev)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	uint32_t soft_min_clk = 0;
+	uint32_t soft_max_clk = 0;
+	int ret = 0;
+
+	pi->sclk_dpm.soft_min_clk = pi->sys_info.bootup_sclk;
+	pi->sclk_dpm.soft_max_clk = pi->sys_info.bootup_sclk;
+
+	soft_min_clk = cz_get_sclk_level(adev,
+				pi->sclk_dpm.soft_min_clk,
+				PPSMC_MSG_SetSclkSoftMin);
+	soft_max_clk = cz_get_sclk_level(adev,
+				pi->sclk_dpm.soft_max_clk,
+				PPSMC_MSG_SetSclkSoftMax);
+
+	ret = cz_send_msg_to_smc_with_parameter(adev,
+				PPSMC_MSG_SetSclkSoftMin, soft_min_clk);
+	if (ret)
+		return -EINVAL;
+
+	ret = cz_send_msg_to_smc_with_parameter(adev,
+				PPSMC_MSG_SetSclkSoftMax, soft_max_clk);
+	if (ret)
+		return -EINVAL;
+
+	return 0;
+}
+
+/* TODO */
+static int cz_disable_cgpg(struct amdgpu_device *adev)
+{
+	return 0;
+}
+
+/* TODO */
+static int cz_enable_cgpg(struct amdgpu_device *adev)
+{
+	return 0;
+}
+
+/* TODO */
+static int cz_program_pt_config_registers(struct amdgpu_device *adev)
+{
+	return 0;
+}
+
+static void cz_do_enable_didt(struct amdgpu_device *adev, bool enable)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	uint32_t reg = 0;
+
+	if (pi->caps_sq_ramping) {
+		reg = RREG32_DIDT(ixDIDT_SQ_CTRL0);
+		if (enable)
+			reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1);
+		else
+			reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0);
+		WREG32_DIDT(ixDIDT_SQ_CTRL0, reg);
+	}
+	if (pi->caps_db_ramping) {
+		reg = RREG32_DIDT(ixDIDT_DB_CTRL0);
+		if (enable)
+			reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 1);
+		else
+			reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 0);
+		WREG32_DIDT(ixDIDT_DB_CTRL0, reg);
+	}
+	if (pi->caps_td_ramping) {
+		reg = RREG32_DIDT(ixDIDT_TD_CTRL0);
+		if (enable)
+			reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 1);
+		else
+			reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 0);
+		WREG32_DIDT(ixDIDT_TD_CTRL0, reg);
+	}
+	if (pi->caps_tcp_ramping) {
+		reg = RREG32_DIDT(ixDIDT_TCP_CTRL0);
+		if (enable)
+			reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1);
+		else
+			reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0);
+		WREG32_DIDT(ixDIDT_TCP_CTRL0, reg);
+	}
+
+}
+
+static int cz_enable_didt(struct amdgpu_device *adev, bool enable)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	int ret;
+
+	if (pi->caps_sq_ramping || pi->caps_db_ramping ||
+			pi->caps_td_ramping || pi->caps_tcp_ramping) {
+		if (adev->gfx.gfx_current_status != AMDGPU_GFX_SAFE_MODE) {
+			ret = cz_disable_cgpg(adev);
+			if (ret) {
+				DRM_ERROR("Pre Di/Dt disable cg/pg failed\n");
+				return -EINVAL;
+			}
+			adev->gfx.gfx_current_status = AMDGPU_GFX_SAFE_MODE;
+		}
+
+		ret = cz_program_pt_config_registers(adev);
+		if (ret) {
+			DRM_ERROR("Di/Dt config failed\n");
+			return -EINVAL;
+		}
+		cz_do_enable_didt(adev, enable);
+
+		if (adev->gfx.gfx_current_status == AMDGPU_GFX_SAFE_MODE) {
+			ret = cz_enable_cgpg(adev);
+			if (ret) {
+				DRM_ERROR("Post Di/Dt enable cg/pg failed\n");
+				return -EINVAL;
+			}
+			adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
+		}
+	}
+
+	return 0;
+}
+
+/* TODO */
+static void cz_reset_acp_boot_level(struct amdgpu_device *adev)
+{
+}
+
+static void cz_update_current_ps(struct amdgpu_device *adev,
+					struct amdgpu_ps *rps)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	struct cz_ps *ps = cz_get_ps(rps);
+
+	pi->current_ps = *ps;
+	pi->current_rps = *rps;
+	pi->current_rps.ps_priv = ps;
+
+}
+
+static void cz_update_requested_ps(struct amdgpu_device *adev,
+					struct amdgpu_ps *rps)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	struct cz_ps *ps = cz_get_ps(rps);
+
+	pi->requested_ps = *ps;
+	pi->requested_rps = *rps;
+	pi->requested_rps.ps_priv = ps;
+
+}
+
+/* PP arbiter support needed TODO */
+static void cz_apply_state_adjust_rules(struct amdgpu_device *adev,
+					struct amdgpu_ps *new_rps,
+					struct amdgpu_ps *old_rps)
+{
+	struct cz_ps *ps = cz_get_ps(new_rps);
+	struct cz_power_info *pi = cz_get_pi(adev);
+	struct amdgpu_clock_and_voltage_limits *limits =
+		&adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+	/* 10kHz memory clock */
+	uint32_t mclk = 0;
+
+	ps->force_high = false;
+	ps->need_dfs_bypass = true;
+	pi->video_start = new_rps->dclk || new_rps->vclk ||
+				new_rps->evclk || new_rps->ecclk;
+
+	if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
+			ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
+		pi->battery_state = true;
+	else
+		pi->battery_state = false;
+
+	if (pi->caps_stable_power_state)
+		mclk = limits->mclk;
+
+	if (mclk > pi->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORY_CLOCK - 1])
+		ps->force_high = true;
+
+}
+
+static int cz_dpm_enable(struct amdgpu_device *adev)
+{
+	int ret = 0;
+
+	/* renable will hang up SMU, so check first */
+	if (cz_check_for_dpm_enabled(adev))
+		return -EINVAL;
+
+	cz_program_voting_clients(adev);
+
+	ret = cz_start_dpm(adev);
+	if (ret) {
+		DRM_ERROR("Carrizo DPM enable failed\n");
+		return -EINVAL;
+	}
+
+	ret = cz_program_bootup_state(adev);
+	if (ret) {
+		DRM_ERROR("Carrizo bootup state program failed\n");
+		return -EINVAL;
+	}
+
+	ret = cz_enable_didt(adev, true);
+	if (ret) {
+		DRM_ERROR("Carrizo enable di/dt failed\n");
+		return -EINVAL;
+	}
+
+	cz_reset_acp_boot_level(adev);
+
+	cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
+
+	return 0;
+}
+
+static int cz_dpm_hw_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int ret = 0;
+
+	mutex_lock(&adev->pm.mutex);
+
+	/* init smc in dpm hw init */
+	ret = cz_smu_init(adev);
+	if (ret) {
+		DRM_ERROR("amdgpu: smc initialization failed\n");
+		mutex_unlock(&adev->pm.mutex);
+		return ret;
+	}
+
+	/* do the actual fw loading */
+	ret = cz_smu_start(adev);
+	if (ret) {
+		DRM_ERROR("amdgpu: smc start failed\n");
+		mutex_unlock(&adev->pm.mutex);
+		return ret;
+	}
+
+	if (!amdgpu_dpm) {
+		adev->pm.dpm_enabled = false;
+		mutex_unlock(&adev->pm.mutex);
+		return ret;
+	}
+
+	/* cz dpm setup asic */
+	cz_dpm_setup_asic(adev);
+
+	/* cz dpm enable */
+	ret = cz_dpm_enable(adev);
+	if (ret)
+		adev->pm.dpm_enabled = false;
+	else
+		adev->pm.dpm_enabled = true;
+
+	mutex_unlock(&adev->pm.mutex);
+
+	return 0;
+}
+
+static int cz_dpm_disable(struct amdgpu_device *adev)
+{
+	int ret = 0;
+
+	if (!cz_check_for_dpm_enabled(adev))
+		return -EINVAL;
+
+	ret = cz_enable_didt(adev, false);
+	if (ret) {
+		DRM_ERROR("Carrizo disable di/dt failed\n");
+		return -EINVAL;
+	}
+
+	/* powerup blocks */
+	cz_dpm_powergate_uvd(adev, false);
+
+	cz_clear_voting_clients(adev);
+	cz_stop_dpm(adev);
+	cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
+
+	return 0;
+}
+
+static int cz_dpm_hw_fini(void *handle)
+{
+	int ret = 0;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	mutex_lock(&adev->pm.mutex);
+
+	cz_smu_fini(adev);
+
+	if (adev->pm.dpm_enabled) {
+		ret = cz_dpm_disable(adev);
+
+		adev->pm.dpm.current_ps =
+			adev->pm.dpm.requested_ps =
+			adev->pm.dpm.boot_ps;
+	}
+
+	adev->pm.dpm_enabled = false;
+
+	mutex_unlock(&adev->pm.mutex);
+
+	return ret;
+}
+
+static int cz_dpm_suspend(void *handle)
+{
+	int ret = 0;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (adev->pm.dpm_enabled) {
+		mutex_lock(&adev->pm.mutex);
+
+		ret = cz_dpm_disable(adev);
+
+		adev->pm.dpm.current_ps =
+			adev->pm.dpm.requested_ps =
+			adev->pm.dpm.boot_ps;
+
+		mutex_unlock(&adev->pm.mutex);
+	}
+
+	return ret;
+}
+
+static int cz_dpm_resume(void *handle)
+{
+	int ret = 0;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	mutex_lock(&adev->pm.mutex);
+	ret = cz_smu_init(adev);
+	if (ret) {
+		DRM_ERROR("amdgpu: smc resume failed\n");
+		mutex_unlock(&adev->pm.mutex);
+		return ret;
+	}
+
+	/* do the actual fw loading */
+	ret = cz_smu_start(adev);
+	if (ret) {
+		DRM_ERROR("amdgpu: smc start failed\n");
+		mutex_unlock(&adev->pm.mutex);
+		return ret;
+	}
+
+	if (!amdgpu_dpm) {
+		adev->pm.dpm_enabled = false;
+		mutex_unlock(&adev->pm.mutex);
+		return ret;
+	}
+
+	/* cz dpm setup asic */
+	cz_dpm_setup_asic(adev);
+
+	/* cz dpm enable */
+	ret = cz_dpm_enable(adev);
+	if (ret)
+		adev->pm.dpm_enabled = false;
+	else
+		adev->pm.dpm_enabled = true;
+
+	mutex_unlock(&adev->pm.mutex);
+	/* upon resume, re-compute the clocks */
+	if (adev->pm.dpm_enabled)
+		amdgpu_pm_compute_clocks(adev);
+
+	return 0;
+}
+
+static int cz_dpm_set_clockgating_state(void *handle,
+					enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int cz_dpm_set_powergating_state(void *handle,
+					enum amd_powergating_state state)
+{
+	return 0;
+}
+
+/* borrowed from KV, need future unify */
+static int cz_dpm_get_temperature(struct amdgpu_device *adev)
+{
+	int actual_temp = 0;
+	uint32_t temp = RREG32_SMC(0xC0300E0C);
+
+	if (temp)
+		actual_temp = 1000 * ((temp / 8) - 49);
+
+	return actual_temp;
+}
+
+static int cz_dpm_pre_set_power_state(struct amdgpu_device *adev)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
+	struct amdgpu_ps *new_ps = &requested_ps;
+
+	cz_update_requested_ps(adev, new_ps);
+	cz_apply_state_adjust_rules(adev, &pi->requested_rps,
+					&pi->current_rps);
+
+	return 0;
+}
+
+static int cz_dpm_update_sclk_limit(struct amdgpu_device *adev)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	struct amdgpu_clock_and_voltage_limits *limits =
+		&adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+	uint32_t clock, stable_ps_clock = 0;
+
+	clock = pi->sclk_dpm.soft_min_clk;
+
+	if (pi->caps_stable_power_state) {
+		stable_ps_clock = limits->sclk * 75 / 100;
+		if (clock < stable_ps_clock)
+			clock = stable_ps_clock;
+	}
+
+	if (clock != pi->sclk_dpm.soft_min_clk) {
+		pi->sclk_dpm.soft_min_clk = clock;
+		cz_send_msg_to_smc_with_parameter(adev,
+				PPSMC_MSG_SetSclkSoftMin,
+				cz_get_sclk_level(adev, clock,
+					PPSMC_MSG_SetSclkSoftMin));
+	}
+
+	if (pi->caps_stable_power_state &&
+			pi->sclk_dpm.soft_max_clk != clock) {
+		pi->sclk_dpm.soft_max_clk = clock;
+		cz_send_msg_to_smc_with_parameter(adev,
+				PPSMC_MSG_SetSclkSoftMax,
+				cz_get_sclk_level(adev, clock,
+					PPSMC_MSG_SetSclkSoftMax));
+	} else {
+		cz_send_msg_to_smc_with_parameter(adev,
+				PPSMC_MSG_SetSclkSoftMax,
+				cz_get_sclk_level(adev,
+					pi->sclk_dpm.soft_max_clk,
+					PPSMC_MSG_SetSclkSoftMax));
+	}
+
+	return 0;
+}
+
+static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev)
+{
+	int ret = 0;
+	struct cz_power_info *pi = cz_get_pi(adev);
+
+	if (pi->caps_sclk_ds) {
+		cz_send_msg_to_smc_with_parameter(adev,
+				PPSMC_MSG_SetMinDeepSleepSclk,
+				CZ_MIN_DEEP_SLEEP_SCLK);
+	}
+
+	return ret;
+}
+
+/* ?? without dal support, is this still needed in setpowerstate list*/
+static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev)
+{
+	int ret = 0;
+	struct cz_power_info *pi = cz_get_pi(adev);
+
+	cz_send_msg_to_smc_with_parameter(adev,
+			PPSMC_MSG_SetWatermarkFrequency,
+			pi->sclk_dpm.soft_max_clk);
+
+	return ret;
+}
+
+static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev)
+{
+	int ret = 0;
+	struct cz_power_info *pi = cz_get_pi(adev);
+
+	/* also depend on dal NBPStateDisableRequired */
+	if (pi->nb_dpm_enabled_by_driver && !pi->nb_dpm_enabled) {
+		ret = cz_send_msg_to_smc_with_parameter(adev,
+				PPSMC_MSG_EnableAllSmuFeatures,
+				NB_DPM_MASK);
+		if (ret) {
+			DRM_ERROR("amdgpu: nb dpm enable failed\n");
+			return ret;
+		}
+		pi->nb_dpm_enabled = true;
+	}
+
+	return ret;
+}
+
+static void cz_dpm_nbdpm_lm_pstate_enable(struct amdgpu_device *adev,
+							bool enable)
+{
+	if (enable)
+		cz_send_msg_to_smc(adev, PPSMC_MSG_EnableLowMemoryPstate);
+	else
+		cz_send_msg_to_smc(adev, PPSMC_MSG_DisableLowMemoryPstate);
+
+}
+
+static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev)
+{
+	int ret = 0;
+	struct cz_power_info *pi = cz_get_pi(adev);
+	struct cz_ps *ps = &pi->requested_ps;
+
+	if (pi->sys_info.nb_dpm_enable) {
+		if (ps->force_high)
+			cz_dpm_nbdpm_lm_pstate_enable(adev, true);
+		else
+			cz_dpm_nbdpm_lm_pstate_enable(adev, false);
+	}
+
+	return ret;
+}
+
+/* with dpm enabled */
+static int cz_dpm_set_power_state(struct amdgpu_device *adev)
+{
+	int ret = 0;
+
+	cz_dpm_update_sclk_limit(adev);
+	cz_dpm_set_deep_sleep_sclk_threshold(adev);
+	cz_dpm_set_watermark_threshold(adev);
+	cz_dpm_enable_nbdpm(adev);
+	cz_dpm_update_low_memory_pstate(adev);
+
+	return ret;
+}
+
+static void cz_dpm_post_set_power_state(struct amdgpu_device *adev)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	struct amdgpu_ps *ps = &pi->requested_rps;
+
+	cz_update_current_ps(adev, ps);
+
+}
+
+static int cz_dpm_force_highest(struct amdgpu_device *adev)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	int ret = 0;
+
+	if (pi->sclk_dpm.soft_min_clk != pi->sclk_dpm.soft_max_clk) {
+		pi->sclk_dpm.soft_min_clk =
+			pi->sclk_dpm.soft_max_clk;
+		ret = cz_send_msg_to_smc_with_parameter(adev,
+				PPSMC_MSG_SetSclkSoftMin,
+				cz_get_sclk_level(adev,
+					pi->sclk_dpm.soft_min_clk,
+					PPSMC_MSG_SetSclkSoftMin));
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+static int cz_dpm_force_lowest(struct amdgpu_device *adev)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	int ret = 0;
+
+	if (pi->sclk_dpm.soft_max_clk != pi->sclk_dpm.soft_min_clk) {
+		pi->sclk_dpm.soft_max_clk = pi->sclk_dpm.soft_min_clk;
+		ret = cz_send_msg_to_smc_with_parameter(adev,
+				PPSMC_MSG_SetSclkSoftMax,
+				cz_get_sclk_level(adev,
+					pi->sclk_dpm.soft_max_clk,
+					PPSMC_MSG_SetSclkSoftMax));
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+static uint32_t cz_dpm_get_max_sclk_level(struct amdgpu_device *adev)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+
+	if (!pi->max_sclk_level) {
+		cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
+		pi->max_sclk_level = cz_get_argument(adev) + 1;
+	}
+
+	if (pi->max_sclk_level > CZ_MAX_HARDWARE_POWERLEVELS) {
+		DRM_ERROR("Invalid max sclk level!\n");
+		return -EINVAL;
+	}
+
+	return pi->max_sclk_level;
+}
+
+static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	struct amdgpu_clock_voltage_dependency_table *dep_table =
+		&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+	uint32_t level = 0;
+	int ret = 0;
+
+	pi->sclk_dpm.soft_min_clk = dep_table->entries[0].clk;
+	level = cz_dpm_get_max_sclk_level(adev) - 1;
+	if (level < dep_table->count)
+		pi->sclk_dpm.soft_max_clk = dep_table->entries[level].clk;
+	else
+		pi->sclk_dpm.soft_max_clk =
+			dep_table->entries[dep_table->count - 1].clk;
+
+	/* get min/max sclk soft value
+	 * notify SMU to execute */
+	ret = cz_send_msg_to_smc_with_parameter(adev,
+				PPSMC_MSG_SetSclkSoftMin,
+				cz_get_sclk_level(adev,
+					pi->sclk_dpm.soft_min_clk,
+					PPSMC_MSG_SetSclkSoftMin));
+	if (ret)
+		return ret;
+
+	ret = cz_send_msg_to_smc_with_parameter(adev,
+				PPSMC_MSG_SetSclkSoftMax,
+				cz_get_sclk_level(adev,
+					pi->sclk_dpm.soft_max_clk,
+					PPSMC_MSG_SetSclkSoftMax));
+	if (ret)
+		return ret;
+
+	DRM_INFO("DPM unforce state min=%d, max=%d.\n",
+			pi->sclk_dpm.soft_min_clk,
+			pi->sclk_dpm.soft_max_clk);
+
+	return 0;
+}
+
+static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
+				enum amdgpu_dpm_forced_level level)
+{
+	int ret = 0;
+
+	switch (level) {
+	case AMDGPU_DPM_FORCED_LEVEL_HIGH:
+		ret = cz_dpm_force_highest(adev);
+		if (ret)
+			return ret;
+		break;
+	case AMDGPU_DPM_FORCED_LEVEL_LOW:
+		ret = cz_dpm_force_lowest(adev);
+		if (ret)
+			return ret;
+		break;
+	case AMDGPU_DPM_FORCED_LEVEL_AUTO:
+		ret = cz_dpm_unforce_dpm_levels(adev);
+		if (ret)
+			return ret;
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+/* fix me, display configuration change lists here
+ * mostly dal related*/
+static void cz_dpm_display_configuration_changed(struct amdgpu_device *adev)
+{
+}
+
+static uint32_t cz_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	struct cz_ps *requested_state = cz_get_ps(&pi->requested_rps);
+
+	if (low)
+		return requested_state->levels[0].sclk;
+	else
+		return requested_state->levels[requested_state->num_levels - 1].sclk;
+
+}
+
+static uint32_t cz_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+
+	return pi->sys_info.bootup_uma_clk;
+}
+
+static int cz_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	int ret = 0;
+
+	if (enable && pi->caps_uvd_dpm ) {
+		pi->dpm_flags |= DPMFlags_UVD_Enabled;
+		DRM_DEBUG("UVD DPM Enabled.\n");
+
+		ret = cz_send_msg_to_smc_with_parameter(adev,
+			PPSMC_MSG_EnableAllSmuFeatures, UVD_DPM_MASK);
+	} else {
+		pi->dpm_flags &= ~DPMFlags_UVD_Enabled;
+		DRM_DEBUG("UVD DPM Stopped\n");
+
+		ret = cz_send_msg_to_smc_with_parameter(adev,
+			PPSMC_MSG_DisableAllSmuFeatures, UVD_DPM_MASK);
+	}
+
+	return ret;
+}
+
+static int cz_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
+{
+	return cz_enable_uvd_dpm(adev, !gate);
+}
+
+
+static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
+{
+	struct cz_power_info *pi = cz_get_pi(adev);
+	int ret;
+
+	if (pi->uvd_power_gated == gate)
+		return;
+
+	pi->uvd_power_gated = gate;
+
+	if (gate) {
+		if (pi->caps_uvd_pg) {
+			/* disable clockgating so we can properly shut down the block */
+			ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+							    AMD_CG_STATE_UNGATE);
+			/* shutdown the UVD block */
+			ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+							    AMD_PG_STATE_GATE);
+			/* XXX: check for errors */
+		}
+		cz_update_uvd_dpm(adev, gate);
+		if (pi->caps_uvd_pg)
+			/* power off the UVD block */
+			cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF);
+	} else {
+		if (pi->caps_uvd_pg) {
+			/* power on the UVD block */
+			if (pi->uvd_dynamic_pg)
+				cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1);
+			else
+				cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0);
+			/* re-init the UVD block */
+			ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+							    AMD_PG_STATE_UNGATE);
+			/* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
+			ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+							    AMD_CG_STATE_GATE);
+			/* XXX: check for errors */
+		}
+		cz_update_uvd_dpm(adev, gate);
+	}
+}
+
+const struct amd_ip_funcs cz_dpm_ip_funcs = {
+	.early_init = cz_dpm_early_init,
+	.late_init = cz_dpm_late_init,
+	.sw_init = cz_dpm_sw_init,
+	.sw_fini = cz_dpm_sw_fini,
+	.hw_init = cz_dpm_hw_init,
+	.hw_fini = cz_dpm_hw_fini,
+	.suspend = cz_dpm_suspend,
+	.resume = cz_dpm_resume,
+	.is_idle = NULL,
+	.wait_for_idle = NULL,
+	.soft_reset = NULL,
+	.print_status = NULL,
+	.set_clockgating_state = cz_dpm_set_clockgating_state,
+	.set_powergating_state = cz_dpm_set_powergating_state,
+};
+
+static const struct amdgpu_dpm_funcs cz_dpm_funcs = {
+	.get_temperature = cz_dpm_get_temperature,
+	.pre_set_power_state = cz_dpm_pre_set_power_state,
+	.set_power_state = cz_dpm_set_power_state,
+	.post_set_power_state = cz_dpm_post_set_power_state,
+	.display_configuration_changed = cz_dpm_display_configuration_changed,
+	.get_sclk = cz_dpm_get_sclk,
+	.get_mclk = cz_dpm_get_mclk,
+	.print_power_state = cz_dpm_print_power_state,
+	.debugfs_print_current_performance_level =
+				cz_dpm_debugfs_print_current_performance_level,
+	.force_performance_level = cz_dpm_force_dpm_level,
+	.vblank_too_short = NULL,
+	.powergate_uvd = cz_dpm_powergate_uvd,
+};
+
+static void cz_dpm_set_funcs(struct amdgpu_device *adev)
+{
+	if (NULL == adev->pm.funcs)
+		adev->pm.funcs = &cz_dpm_funcs;
+}

+ 237 - 0
drivers/gpu/drm/amd/amdgpu/cz_dpm.h

@@ -0,0 +1,237 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __CZ_DPM_H__
+#define __CZ_DPM_H__
+
+#include "smu8_fusion.h"
+
+#define CZ_AT_DFLT					30
+#define CZ_NUM_NBPSTATES				4
+#define CZ_NUM_NBPMEMORY_CLOCK				2
+#define CZ_MAX_HARDWARE_POWERLEVELS			8
+#define CZ_MAX_DISPLAY_CLOCK_LEVEL			8
+#define CZ_MAX_DISPLAYPHY_IDS				10
+
+#define PPCZ_VOTINGRIGHTSCLIENTS_DFLT0			0x3FFFC102
+
+#define SMC_RAM_END					0x40000
+
+#define DPMFlags_SCLK_Enabled				0x00000001
+#define DPMFlags_UVD_Enabled				0x00000002
+#define DPMFlags_VCE_Enabled				0x00000004
+#define DPMFlags_ACP_Enabled				0x00000008
+#define DPMFlags_ForceHighestValid			0x40000000
+#define DPMFlags_Debug					0x80000000
+
+/* Do not change the following, it is also defined in SMU8.h */
+#define SMU_EnabledFeatureScoreboard_AcpDpmOn		0x00000001
+#define SMU_EnabledFeatureScoreboard_SclkDpmOn		0x00100000
+#define SMU_EnabledFeatureScoreboard_UvdDpmOn		0x00800000
+#define SMU_EnabledFeatureScoreboard_VceDpmOn		0x01000000
+
+/* temporary solution to SetMinDeepSleepSclk
+ * should indicate by display adaptor
+ * 10k Hz unit*/
+#define CZ_MIN_DEEP_SLEEP_SCLK				800
+
+enum cz_pt_config_reg_type {
+	CZ_CONFIGREG_MMR = 0,
+	CZ_CONFIGREG_SMC_IND,
+	CZ_CONFIGREG_DIDT_IND,
+	CZ_CONFIGREG_CACHE,
+	CZ_CONFIGREG_MAX
+};
+
+struct cz_pt_config_reg {
+	uint32_t offset;
+	uint32_t mask;
+	uint32_t shift;
+	uint32_t value;
+	enum cz_pt_config_reg_type type;
+};
+
+struct cz_dpm_entry {
+	uint32_t	soft_min_clk;
+	uint32_t	hard_min_clk;
+	uint32_t	soft_max_clk;
+	uint32_t	hard_max_clk;
+};
+
+struct cz_pl {
+	uint32_t sclk;
+	uint8_t vddc_index;
+	uint8_t ds_divider_index;
+	uint8_t ss_divider_index;
+	uint8_t allow_gnb_slow;
+	uint8_t force_nbp_state;
+	uint8_t display_wm;
+	uint8_t vce_wm;
+};
+
+struct cz_ps {
+	struct cz_pl levels[CZ_MAX_HARDWARE_POWERLEVELS];
+	uint32_t num_levels;
+	bool need_dfs_bypass;
+	uint8_t dpm0_pg_nb_ps_lo;
+	uint8_t dpm0_pg_nb_ps_hi;
+	uint8_t dpmx_nb_ps_lo;
+	uint8_t dpmx_nb_ps_hi;
+	bool force_high;
+};
+
+struct cz_displayphy_entry {
+	uint8_t phy_present;
+	uint8_t active_lane_mapping;
+	uint8_t display_conf_type;
+	uint8_t num_active_lanes;
+};
+
+struct cz_displayphy_info {
+	bool phy_access_initialized;
+	struct cz_displayphy_entry entries[CZ_MAX_DISPLAYPHY_IDS];
+};
+
+struct cz_sys_info {
+	uint32_t bootup_uma_clk;
+	uint32_t bootup_sclk;
+	uint32_t dentist_vco_freq;
+	uint32_t nb_dpm_enable;
+	uint32_t nbp_memory_clock[CZ_NUM_NBPMEMORY_CLOCK];
+	uint32_t nbp_n_clock[CZ_NUM_NBPSTATES];
+	uint8_t nbp_voltage_index[CZ_NUM_NBPSTATES];
+	uint32_t display_clock[CZ_MAX_DISPLAY_CLOCK_LEVEL];
+	uint16_t bootup_nb_voltage_index;
+	uint8_t htc_tmp_lmt;
+	uint8_t htc_hyst_lmt;
+	uint32_t uma_channel_number;
+};
+
+struct cz_power_info {
+	uint32_t active_target[CZ_MAX_HARDWARE_POWERLEVELS];
+	struct cz_sys_info sys_info;
+	struct cz_pl boot_pl;
+	bool disable_nb_ps3_in_battery;
+	bool battery_state;
+	uint32_t lowest_valid;
+	uint32_t highest_valid;
+	uint16_t high_voltage_threshold;
+	/* smc offsets */
+	uint32_t sram_end;
+	uint32_t dpm_table_start;
+	uint32_t soft_regs_start;
+	/* dpm SMU tables */
+	uint8_t uvd_level_count;
+	uint8_t vce_level_count;
+	uint8_t acp_level_count;
+	uint32_t fps_high_threshold;
+	uint32_t fps_low_threshold;
+	/* dpm table */
+	uint32_t dpm_flags;
+	struct cz_dpm_entry sclk_dpm;
+	struct cz_dpm_entry uvd_dpm;
+	struct cz_dpm_entry vce_dpm;
+	struct cz_dpm_entry acp_dpm;
+
+	uint8_t uvd_boot_level;
+	uint8_t uvd_interval;
+	uint8_t vce_boot_level;
+	uint8_t vce_interval;
+	uint8_t acp_boot_level;
+	uint8_t acp_interval;
+
+	uint8_t graphics_boot_level;
+	uint8_t graphics_interval;
+	uint8_t graphics_therm_throttle_enable;
+	uint8_t graphics_voltage_change_enable;
+	uint8_t graphics_clk_slow_enable;
+	uint8_t graphics_clk_slow_divider;
+
+	uint32_t low_sclk_interrupt_threshold;
+	bool uvd_power_gated;
+	bool vce_power_gated;
+	bool acp_power_gated;
+
+	uint32_t active_process_mask;
+
+	uint32_t mgcg_cgtt_local0;
+	uint32_t mgcg_cgtt_local1;
+	uint32_t clock_slow_down_step;
+	uint32_t skip_clock_slow_down;
+	bool enable_nb_ps_policy;
+	uint32_t voting_clients;
+	uint32_t voltage_drop_threshold;
+	uint32_t gfx_pg_threshold;
+	uint32_t max_sclk_level;
+	/* flags */
+	bool didt_enabled;
+	bool video_start;
+	bool cac_enabled;
+	bool bapm_enabled;
+	bool nb_dpm_enabled_by_driver;
+	bool nb_dpm_enabled;
+	bool auto_thermal_throttling_enabled;
+	bool dpm_enabled;
+	bool need_pptable_upload;
+	/* caps */
+	bool caps_cac;
+	bool caps_power_containment;
+	bool caps_sq_ramping;
+	bool caps_db_ramping;
+	bool caps_td_ramping;
+	bool caps_tcp_ramping;
+	bool caps_sclk_throttle_low_notification;
+	bool caps_fps;
+	bool caps_uvd_dpm;
+	bool caps_uvd_pg;
+	bool caps_vce_dpm;
+	bool caps_vce_pg;
+	bool caps_acp_dpm;
+	bool caps_acp_pg;
+	bool caps_stable_power_state;
+	bool caps_enable_dfs_bypass;
+	bool caps_sclk_ds;
+	bool caps_voltage_island;
+	/* power state */
+	struct amdgpu_ps current_rps;
+	struct cz_ps current_ps;
+	struct amdgpu_ps requested_rps;
+	struct cz_ps requested_ps;
+
+	bool uvd_power_down;
+	bool vce_power_down;
+	bool acp_power_down;
+
+	bool uvd_dynamic_pg;
+};
+
+/* cz_smc.c */
+uint32_t cz_get_argument(struct amdgpu_device *adev);
+int cz_send_msg_to_smc(struct amdgpu_device *adev, uint16_t msg);
+int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
+			uint16_t msg, uint32_t parameter);
+int cz_read_smc_sram_dword(struct amdgpu_device *adev,
+			uint32_t smc_address, uint32_t *value, uint32_t limit);
+int cz_smu_upload_pptable(struct amdgpu_device *adev);
+int cz_smu_download_pptable(struct amdgpu_device *adev, void **table);
+#endif

+ 452 - 0
drivers/gpu/drm/amd/amdgpu/cz_ih.c

@@ -0,0 +1,452 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "vid.h"
+
+#include "oss/oss_3_0_1_d.h"
+#include "oss/oss_3_0_1_sh_mask.h"
+
+#include "bif/bif_5_1_d.h"
+#include "bif/bif_5_1_sh_mask.h"
+
+/*
+ * Interrupts
+ * Starting with r6xx, interrupts are handled via a ring buffer.
+ * Ring buffers are areas of GPU accessible memory that the GPU
+ * writes interrupt vectors into and the host reads vectors out of.
+ * There is a rptr (read pointer) that determines where the
+ * host is currently reading, and a wptr (write pointer)
+ * which determines where the GPU has written.  When the
+ * pointers are equal, the ring is idle.  When the GPU
+ * writes vectors to the ring buffer, it increments the
+ * wptr.  When there is an interrupt, the host then starts
+ * fetching commands and processing them until the pointers are
+ * equal again at which point it updates the rptr.
+ */
+
+static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev);
+
+/**
+ * cz_ih_enable_interrupts - Enable the interrupt ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Enable the interrupt ring buffer (VI).
+ */
+static void cz_ih_enable_interrupts(struct amdgpu_device *adev)
+{
+	u32 ih_cntl = RREG32(mmIH_CNTL);
+	u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
+
+	ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, ENABLE_INTR, 1);
+	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
+	WREG32(mmIH_CNTL, ih_cntl);
+	WREG32(mmIH_RB_CNTL, ih_rb_cntl);
+	adev->irq.ih.enabled = true;
+}
+
+/**
+ * cz_ih_disable_interrupts - Disable the interrupt ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disable the interrupt ring buffer (VI).
+ */
+static void cz_ih_disable_interrupts(struct amdgpu_device *adev)
+{
+	u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
+	u32 ih_cntl = RREG32(mmIH_CNTL);
+
+	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
+	ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, ENABLE_INTR, 0);
+	WREG32(mmIH_RB_CNTL, ih_rb_cntl);
+	WREG32(mmIH_CNTL, ih_cntl);
+	/* set rptr, wptr to 0 */
+	WREG32(mmIH_RB_RPTR, 0);
+	WREG32(mmIH_RB_WPTR, 0);
+	adev->irq.ih.enabled = false;
+	adev->irq.ih.rptr = 0;
+}
+
+/**
+ * cz_ih_irq_init - init and enable the interrupt ring
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate a ring buffer for the interrupt controller,
+ * enable the RLC, disable interrupts, enable the IH
+ * ring buffer and enable it (VI).
+ * Called at device load and reume.
+ * Returns 0 for success, errors for failure.
+ */
+static int cz_ih_irq_init(struct amdgpu_device *adev)
+{
+	int ret = 0;
+	int rb_bufsz;
+	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+	u64 wptr_off;
+
+	/* disable irqs */
+	cz_ih_disable_interrupts(adev);
+
+	/* setup interrupt control */
+	WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
+	interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
+	/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
+	 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
+	 */
+	interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
+	/* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
+	interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
+	WREG32(mmINTERRUPT_CNTL, interrupt_cntl);
+
+	/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
+	WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
+
+	rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
+	ih_rb_cntl = REG_SET_FIELD(0, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 1);
+	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
+
+	/* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */
+	ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1);
+
+	/* set the writeback address whether it's enabled or not */
+	wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
+	WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
+	WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+
+	WREG32(mmIH_RB_CNTL, ih_rb_cntl);
+
+	/* set rptr, wptr to 0 */
+	WREG32(mmIH_RB_RPTR, 0);
+	WREG32(mmIH_RB_WPTR, 0);
+
+	/* Default settings for IH_CNTL (disabled at first) */
+	ih_cntl = RREG32(mmIH_CNTL);
+	ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, MC_VMID, 0);
+
+	if (adev->irq.msi_enabled)
+		ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, RPTR_REARM, 1);
+	WREG32(mmIH_CNTL, ih_cntl);
+
+	pci_set_master(adev->pdev);
+
+	/* enable interrupts */
+	cz_ih_enable_interrupts(adev);
+
+	return ret;
+}
+
+/**
+ * cz_ih_irq_disable - disable interrupts
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disable interrupts on the hw (VI).
+ */
+static void cz_ih_irq_disable(struct amdgpu_device *adev)
+{
+	cz_ih_disable_interrupts(adev);
+
+	/* Wait and acknowledge irq */
+	mdelay(1);
+}
+
+/**
+ * cz_ih_get_wptr - get the IH ring buffer wptr
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Get the IH ring buffer wptr from either the register
+ * or the writeback memory buffer (VI).  Also check for
+ * ring buffer overflow and deal with it.
+ * Used by cz_irq_process(VI).
+ * Returns the value of the wptr.
+ */
+static u32 cz_ih_get_wptr(struct amdgpu_device *adev)
+{
+	u32 wptr, tmp;
+
+	wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
+
+	if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
+		wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+		/* When a ring buffer overflow happen start parsing interrupt
+		 * from the last not overwritten vector (wptr + 16). Hopefully
+		 * this should allow us to catchup.
+		 */
+		dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+			wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
+		adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
+		tmp = RREG32(mmIH_RB_CNTL);
+		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+		WREG32(mmIH_RB_CNTL, tmp);
+	}
+	return (wptr & adev->irq.ih.ptr_mask);
+}
+
+/**
+ * cz_ih_decode_iv - decode an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Decodes the interrupt vector at the current rptr
+ * position and also advance the position.
+ */
+static void cz_ih_decode_iv(struct amdgpu_device *adev,
+				 struct amdgpu_iv_entry *entry)
+{
+	/* wptr/rptr are in bytes! */
+	u32 ring_index = adev->irq.ih.rptr >> 2;
+	uint32_t dw[4];
+	
+	dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
+	dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
+	dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
+	dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+
+	entry->src_id = dw[0] & 0xff;
+	entry->src_data = dw[1] & 0xfffffff;
+	entry->ring_id = dw[2] & 0xff;
+	entry->vm_id = (dw[2] >> 8) & 0xff;
+	entry->pas_id = (dw[2] >> 16) & 0xffff;
+
+	/* wptr/rptr are in bytes! */
+	adev->irq.ih.rptr += 16;
+}
+
+/**
+ * cz_ih_set_rptr - set the IH ring buffer rptr
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set the IH ring buffer rptr.
+ */
+static void cz_ih_set_rptr(struct amdgpu_device *adev)
+{
+	WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr);
+}
+
+static int cz_ih_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	cz_ih_set_interrupt_funcs(adev);
+	return 0;
+}
+
+static int cz_ih_sw_init(void *handle)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
+	if (r)
+		return r;
+
+	r = amdgpu_irq_init(adev);
+
+	return r;
+}
+
+static int cz_ih_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	amdgpu_irq_fini(adev);
+	amdgpu_ih_ring_fini(adev);
+
+	return 0;
+}
+
+static int cz_ih_hw_init(void *handle)
+{
+	int r;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	r = cz_ih_irq_init(adev);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+static int cz_ih_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	cz_ih_irq_disable(adev);
+
+	return 0;
+}
+
+static int cz_ih_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return cz_ih_hw_fini(adev);
+}
+
+static int cz_ih_resume(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return cz_ih_hw_init(adev);
+}
+
+static bool cz_ih_is_idle(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 tmp = RREG32(mmSRBM_STATUS);
+
+	if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
+		return false;
+
+	return true;
+}
+
+static int cz_ih_wait_for_idle(void *handle)
+{
+	unsigned i;
+	u32 tmp;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(mmSRBM_STATUS);
+		if (!REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
+			return 0;
+		udelay(1);
+	}
+	return -ETIMEDOUT;
+}
+
+static void cz_ih_print_status(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	dev_info(adev->dev, "CZ IH registers\n");
+	dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
+		RREG32(mmSRBM_STATUS));
+	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
+		RREG32(mmSRBM_STATUS2));
+	dev_info(adev->dev, "  INTERRUPT_CNTL=0x%08X\n",
+		 RREG32(mmINTERRUPT_CNTL));
+	dev_info(adev->dev, "  INTERRUPT_CNTL2=0x%08X\n",
+		 RREG32(mmINTERRUPT_CNTL2));
+	dev_info(adev->dev, "  IH_CNTL=0x%08X\n",
+		 RREG32(mmIH_CNTL));
+	dev_info(adev->dev, "  IH_RB_CNTL=0x%08X\n",
+		 RREG32(mmIH_RB_CNTL));
+	dev_info(adev->dev, "  IH_RB_BASE=0x%08X\n",
+		 RREG32(mmIH_RB_BASE));
+	dev_info(adev->dev, "  IH_RB_WPTR_ADDR_LO=0x%08X\n",
+		 RREG32(mmIH_RB_WPTR_ADDR_LO));
+	dev_info(adev->dev, "  IH_RB_WPTR_ADDR_HI=0x%08X\n",
+		 RREG32(mmIH_RB_WPTR_ADDR_HI));
+	dev_info(adev->dev, "  IH_RB_RPTR=0x%08X\n",
+		 RREG32(mmIH_RB_RPTR));
+	dev_info(adev->dev, "  IH_RB_WPTR=0x%08X\n",
+		 RREG32(mmIH_RB_WPTR));
+}
+
+static int cz_ih_soft_reset(void *handle)
+{
+	u32 srbm_soft_reset = 0;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 tmp = RREG32(mmSRBM_STATUS);
+
+	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
+						SOFT_RESET_IH, 1);
+
+	if (srbm_soft_reset) {
+		cz_ih_print_status((void *)adev);
+
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(mmSRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(mmSRBM_SOFT_RESET, tmp);
+		tmp = RREG32(mmSRBM_SOFT_RESET);
+
+		/* Wait a little for things to settle down */
+		udelay(50);
+
+		cz_ih_print_status((void *)adev);
+	}
+
+	return 0;
+}
+
+static int cz_ih_set_clockgating_state(void *handle,
+					  enum amd_clockgating_state state)
+{
+	// TODO
+	return 0;
+}
+
+static int cz_ih_set_powergating_state(void *handle,
+					  enum amd_powergating_state state)
+{
+	// TODO
+	return 0;
+}
+
+const struct amd_ip_funcs cz_ih_ip_funcs = {
+	.early_init = cz_ih_early_init,
+	.late_init = NULL,
+	.sw_init = cz_ih_sw_init,
+	.sw_fini = cz_ih_sw_fini,
+	.hw_init = cz_ih_hw_init,
+	.hw_fini = cz_ih_hw_fini,
+	.suspend = cz_ih_suspend,
+	.resume = cz_ih_resume,
+	.is_idle = cz_ih_is_idle,
+	.wait_for_idle = cz_ih_wait_for_idle,
+	.soft_reset = cz_ih_soft_reset,
+	.print_status = cz_ih_print_status,
+	.set_clockgating_state = cz_ih_set_clockgating_state,
+	.set_powergating_state = cz_ih_set_powergating_state,
+};
+
+static const struct amdgpu_ih_funcs cz_ih_funcs = {
+	.get_wptr = cz_ih_get_wptr,
+	.decode_iv = cz_ih_decode_iv,
+	.set_rptr = cz_ih_set_rptr
+};
+
+static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev)
+{
+	if (adev->irq.ih_funcs == NULL)
+		adev->irq.ih_funcs = &cz_ih_funcs;
+}
+

+ 29 - 0
drivers/gpu/drm/amd/amdgpu/cz_ih.h

@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __CZ_IH_H__
+#define __CZ_IH_H__
+
+extern const struct amd_ip_funcs cz_ih_ip_funcs;
+
+#endif /* __CZ_IH_H__ */

+ 185 - 0
drivers/gpu/drm/amd/amdgpu/cz_ppsmc.h

@@ -0,0 +1,185 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef CZ_PP_SMC_H
+#define CZ_PP_SMC_H
+
+#pragma pack(push, 1)
+
+/* Fan control algorithm:*/
+#define FDO_MODE_HARDWARE 0
+#define FDO_MODE_PIECE_WISE_LINEAR 1
+
+enum FAN_CONTROL {
+    FAN_CONTROL_FUZZY,
+    FAN_CONTROL_TABLE
+};
+
+enum DPM_ARRAY {
+    DPM_ARRAY_HARD_MAX,
+    DPM_ARRAY_HARD_MIN,
+    DPM_ARRAY_SOFT_MAX,
+    DPM_ARRAY_SOFT_MIN
+};
+
+/*
+ * Return codes for driver to SMC communication.
+ * Leave these #define-s, enums might not be exactly 8-bits on the microcontroller.
+ */
+#define PPSMC_Result_OK             ((uint16_t)0x01)
+#define PPSMC_Result_NoMore         ((uint16_t)0x02)
+#define PPSMC_Result_NotNow         ((uint16_t)0x03)
+#define PPSMC_Result_Failed         ((uint16_t)0xFF)
+#define PPSMC_Result_UnknownCmd     ((uint16_t)0xFE)
+#define PPSMC_Result_UnknownVT      ((uint16_t)0xFD)
+
+#define PPSMC_isERROR(x)            ((uint16_t)0x80 & (x))
+
+/*
+ * Supported driver messages
+ */
+#define PPSMC_MSG_Test                        ((uint16_t) 0x1)
+#define PPSMC_MSG_GetFeatureStatus            ((uint16_t) 0x2)
+#define PPSMC_MSG_EnableAllSmuFeatures        ((uint16_t) 0x3)
+#define PPSMC_MSG_DisableAllSmuFeatures       ((uint16_t) 0x4)
+#define PPSMC_MSG_OptimizeBattery             ((uint16_t) 0x5)
+#define PPSMC_MSG_MaximizePerf                ((uint16_t) 0x6)
+#define PPSMC_MSG_UVDPowerOFF                 ((uint16_t) 0x7)
+#define PPSMC_MSG_UVDPowerON                  ((uint16_t) 0x8)
+#define PPSMC_MSG_VCEPowerOFF                 ((uint16_t) 0x9)
+#define PPSMC_MSG_VCEPowerON                  ((uint16_t) 0xA)
+#define PPSMC_MSG_ACPPowerOFF                 ((uint16_t) 0xB)
+#define PPSMC_MSG_ACPPowerON                  ((uint16_t) 0xC)
+#define PPSMC_MSG_SDMAPowerOFF                ((uint16_t) 0xD)
+#define PPSMC_MSG_SDMAPowerON                 ((uint16_t) 0xE)
+#define PPSMC_MSG_XDMAPowerOFF                ((uint16_t) 0xF)
+#define PPSMC_MSG_XDMAPowerON                 ((uint16_t) 0x10)
+#define PPSMC_MSG_SetMinDeepSleepSclk         ((uint16_t) 0x11)
+#define PPSMC_MSG_SetSclkSoftMin              ((uint16_t) 0x12)
+#define PPSMC_MSG_SetSclkSoftMax              ((uint16_t) 0x13)
+#define PPSMC_MSG_SetSclkHardMin              ((uint16_t) 0x14)
+#define PPSMC_MSG_SetSclkHardMax              ((uint16_t) 0x15)
+#define PPSMC_MSG_SetLclkSoftMin              ((uint16_t) 0x16)
+#define PPSMC_MSG_SetLclkSoftMax              ((uint16_t) 0x17)
+#define PPSMC_MSG_SetLclkHardMin              ((uint16_t) 0x18)
+#define PPSMC_MSG_SetLclkHardMax              ((uint16_t) 0x19)
+#define PPSMC_MSG_SetUvdSoftMin               ((uint16_t) 0x1A)
+#define PPSMC_MSG_SetUvdSoftMax               ((uint16_t) 0x1B)
+#define PPSMC_MSG_SetUvdHardMin               ((uint16_t) 0x1C)
+#define PPSMC_MSG_SetUvdHardMax               ((uint16_t) 0x1D)
+#define PPSMC_MSG_SetEclkSoftMin              ((uint16_t) 0x1E)
+#define PPSMC_MSG_SetEclkSoftMax              ((uint16_t) 0x1F)
+#define PPSMC_MSG_SetEclkHardMin              ((uint16_t) 0x20)
+#define PPSMC_MSG_SetEclkHardMax              ((uint16_t) 0x21)
+#define PPSMC_MSG_SetAclkSoftMin              ((uint16_t) 0x22)
+#define PPSMC_MSG_SetAclkSoftMax              ((uint16_t) 0x23)
+#define PPSMC_MSG_SetAclkHardMin              ((uint16_t) 0x24)
+#define PPSMC_MSG_SetAclkHardMax              ((uint16_t) 0x25)
+#define PPSMC_MSG_SetNclkSoftMin              ((uint16_t) 0x26)
+#define PPSMC_MSG_SetNclkSoftMax              ((uint16_t) 0x27)
+#define PPSMC_MSG_SetNclkHardMin              ((uint16_t) 0x28)
+#define PPSMC_MSG_SetNclkHardMax              ((uint16_t) 0x29)
+#define PPSMC_MSG_SetPstateSoftMin            ((uint16_t) 0x2A)
+#define PPSMC_MSG_SetPstateSoftMax            ((uint16_t) 0x2B)
+#define PPSMC_MSG_SetPstateHardMin            ((uint16_t) 0x2C)
+#define PPSMC_MSG_SetPstateHardMax            ((uint16_t) 0x2D)
+#define PPSMC_MSG_DisableLowMemoryPstate      ((uint16_t) 0x2E)
+#define PPSMC_MSG_EnableLowMemoryPstate       ((uint16_t) 0x2F)
+#define PPSMC_MSG_UcodeAddressLow             ((uint16_t) 0x30)
+#define PPSMC_MSG_UcodeAddressHigh            ((uint16_t) 0x31)
+#define PPSMC_MSG_UcodeLoadStatus             ((uint16_t) 0x32)
+#define PPSMC_MSG_DriverDramAddrHi            ((uint16_t) 0x33)
+#define PPSMC_MSG_DriverDramAddrLo            ((uint16_t) 0x34)
+#define PPSMC_MSG_CondExecDramAddrHi          ((uint16_t) 0x35)
+#define PPSMC_MSG_CondExecDramAddrLo          ((uint16_t) 0x36)
+#define PPSMC_MSG_LoadUcodes                  ((uint16_t) 0x37)
+#define PPSMC_MSG_DriverResetMode             ((uint16_t) 0x38)
+#define PPSMC_MSG_PowerStateNotify            ((uint16_t) 0x39)
+#define PPSMC_MSG_SetDisplayPhyConfig         ((uint16_t) 0x3A)
+#define PPSMC_MSG_GetMaxSclkLevel             ((uint16_t) 0x3B)
+#define PPSMC_MSG_GetMaxLclkLevel             ((uint16_t) 0x3C)
+#define PPSMC_MSG_GetMaxUvdLevel              ((uint16_t) 0x3D)
+#define PPSMC_MSG_GetMaxEclkLevel             ((uint16_t) 0x3E)
+#define PPSMC_MSG_GetMaxAclkLevel             ((uint16_t) 0x3F)
+#define PPSMC_MSG_GetMaxNclkLevel             ((uint16_t) 0x40)
+#define PPSMC_MSG_GetMaxPstate                ((uint16_t) 0x41)
+#define PPSMC_MSG_DramAddrHiVirtual           ((uint16_t) 0x42)
+#define PPSMC_MSG_DramAddrLoVirtual           ((uint16_t) 0x43)
+#define PPSMC_MSG_DramAddrHiPhysical          ((uint16_t) 0x44)
+#define PPSMC_MSG_DramAddrLoPhysical          ((uint16_t) 0x45)
+#define PPSMC_MSG_DramBufferSize              ((uint16_t) 0x46)
+#define PPSMC_MSG_SetMmPwrLogDramAddrHi       ((uint16_t) 0x47)
+#define PPSMC_MSG_SetMmPwrLogDramAddrLo       ((uint16_t) 0x48)
+#define PPSMC_MSG_SetClkTableAddrHi           ((uint16_t) 0x49)
+#define PPSMC_MSG_SetClkTableAddrLo           ((uint16_t) 0x4A)
+#define PPSMC_MSG_GetConservativePowerLimit   ((uint16_t) 0x4B)
+
+#define PPSMC_MSG_InitJobs                    ((uint16_t) 0x252)
+#define PPSMC_MSG_ExecuteJob                  ((uint16_t) 0x254)
+
+#define PPSMC_MSG_NBDPM_Enable                ((uint16_t) 0x140)
+#define PPSMC_MSG_NBDPM_Disable               ((uint16_t) 0x141)
+
+#define PPSMC_MSG_DPM_FPS_Mode                ((uint16_t) 0x15d)
+#define PPSMC_MSG_DPM_Activity_Mode           ((uint16_t) 0x15e)
+
+#define PPSMC_MSG_PmStatusLogStart            ((uint16_t) 0x170)
+#define PPSMC_MSG_PmStatusLogSample           ((uint16_t) 0x171)
+
+#define PPSMC_MSG_AllowLowSclkInterrupt       ((uint16_t) 0x184)
+#define PPSMC_MSG_MmPowerMonitorStart         ((uint16_t) 0x18F)
+#define PPSMC_MSG_MmPowerMonitorStop          ((uint16_t) 0x190)
+#define PPSMC_MSG_MmPowerMonitorRestart       ((uint16_t) 0x191)
+
+#define PPSMC_MSG_SetClockGateMask            ((uint16_t) 0x260)
+#define PPSMC_MSG_SetFpsThresholdLo           ((uint16_t) 0x264)
+#define PPSMC_MSG_SetFpsThresholdHi           ((uint16_t) 0x265)
+#define PPSMC_MSG_SetLowSclkIntrThreshold     ((uint16_t) 0x266)
+
+#define PPSMC_MSG_ClkTableXferToDram          ((uint16_t) 0x267)
+#define PPSMC_MSG_ClkTableXferToSmu           ((uint16_t) 0x268)
+#define PPSMC_MSG_GetAverageGraphicsActivity  ((uint16_t) 0x269)
+#define PPSMC_MSG_GetAverageGioActivity       ((uint16_t) 0x26A)
+#define PPSMC_MSG_SetLoggerBufferSize         ((uint16_t) 0x26B)
+#define PPSMC_MSG_SetLoggerAddressHigh        ((uint16_t) 0x26C)
+#define PPSMC_MSG_SetLoggerAddressLow         ((uint16_t) 0x26D)
+#define PPSMC_MSG_SetWatermarkFrequency       ((uint16_t) 0x26E)
+
+/* REMOVE LATER*/
+#define PPSMC_MSG_DPM_ForceState              ((uint16_t) 0x104)
+
+/* Feature Enable Masks*/
+#define NB_DPM_MASK             0x00000800
+#define VDDGFX_MASK             0x00800000
+#define VCE_DPM_MASK            0x00400000
+#define ACP_DPM_MASK            0x00040000
+#define UVD_DPM_MASK            0x00010000
+#define GFX_CU_PG_MASK          0x00004000
+#define SCLK_DPM_MASK           0x00080000
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(pop)
+
+#endif
+
+#endif

部分文件因为文件数量过多而无法显示