Browse Source

Merge branch 'drm-next-4.14' of git://people.freedesktop.org/~agd5f/linux into drm-next

- Stop reprogramming the MC, the vbios already does this in asic_init
- Reduce internal gart to 256M (this does not affect the ttm GTT pool size)
- Initial support for huge pages
- Rework bo migration logic
- Lots of improvements for vega10
- Powerplay fixes
- Additional Raven enablement
- SR-IOV improvements
- Bug fixes
- Code cleanup

* 'drm-next-4.14' of git://people.freedesktop.org/~agd5f/linux: (138 commits)
  drm/amdgpu: fix header on gfx9 clear state
  drm/amdgpu: reduce the time of reading VBIOS
  drm/amdgpu/virtual_dce: Remove the rmmod error message
  drm/amdgpu/gmc9: disable legacy vga features in gmc init
  drm/amdgpu/gmc8: disable legacy vga features in gmc init
  drm/amdgpu/gmc7: disable legacy vga features in gmc init
  drm/amdgpu/gmc6: disable legacy vga features in gmc init (v2)
  drm/radeon: Set depth on low mem to 16 bpp instead of 8 bpp
  drm/amdgpu: fix the incorrect scratch reg number on gfx v6
  drm/amdgpu: fix the incorrect scratch reg number on gfx v7
  drm/amdgpu: fix the incorrect scratch reg number on gfx v8
  drm/amdgpu: fix the incorrect scratch reg number on gfx v9
  drm/amd/powerplay: add support for 3DP 4K@120Hz on vega10.
  drm/amdgpu: enable huge page handling in the VM v5
  drm/amdgpu: increase fragmentation size for Vega10 v2
  drm/amdgpu: ttm_bind only when user needs gpu_addr in bo pin
  drm/amdgpu: correct clock info for SRIOV
  drm/amdgpu/gmc8: SRIOV need to program fb location
  drm/amdgpu: disable firmware loading for psp v10
  drm/amdgpu:fix gfx fence allocate size
  ...
Dave Airlie 8 years ago
parent
commit
dd24df6570
100 changed files with 4164 additions and 1740 deletions
  1. 1 1
      drivers/gpu/drm/amd/amdgpu/Makefile
  2. 24 98
      drivers/gpu/drm/amd/amdgpu/amdgpu.h
  3. 22 10
      drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
  4. 1 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
  5. 126 35
      drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
  6. 2 4
      drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
  7. 1 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
  8. 2 15
      drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
  9. 8 4
      drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
  10. 23 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
  11. 73 19
      drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
  12. 119 62
      drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
  13. 17 5
      drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
  14. 63 16
      drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
  15. 77 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
  16. 7 15
      drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
  17. 2 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
  18. 22 3
      drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
  19. 4 3
      drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
  20. 4 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
  21. 2 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
  22. 11 8
      drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
  23. 0 12
      drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
  24. 41 29
      drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
  25. 5 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
  26. 31 32
      drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
  27. 5 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
  28. 18 8
      drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
  29. 40 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
  30. 26 26
      drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
  31. 9 9
      drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
  32. 276 92
      drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
  33. 11 5
      drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
  34. 9 5
      drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
  35. 85 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c
  36. 62 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h
  37. 1 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
  38. 193 96
      drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
  39. 8 2
      drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
  40. 1 8
      drivers/gpu/drm/amd/amdgpu/cik.c
  41. 60 0
      drivers/gpu/drm/amd/amdgpu/cik_sdma.c
  42. 21 20
      drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
  43. 2 131
      drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
  44. 2 76
      drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
  45. 2 114
      drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
  46. 2 78
      drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
  47. 40 60
      drivers/gpu/drm/amd/amdgpu/dce_virtual.c
  48. 1 1
      drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
  49. 1 1
      drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
  50. 16 16
      drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
  51. 15 13
      drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
  52. 10 5
      drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
  53. 26 45
      drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
  54. 27 41
      drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
  55. 41 40
      drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
  56. 80 59
      drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
  57. 60 48
      drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
  58. 26 20
      drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
  59. 3 1
      drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
  60. 1 0
      drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
  61. 3 1
      drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
  62. 14 1
      drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
  63. 1 0
      drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
  64. 1 1
      drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
  65. 90 6
      drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
  66. 5 0
      drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
  67. 4 6
      drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
  68. 39 3
      drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
  69. 33 1
      drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
  70. 28 0
      drivers/gpu/drm/amd/amdgpu/si.c
  71. 1 0
      drivers/gpu/drm/amd/amdgpu/si_dpm.c
  72. 54 12
      drivers/gpu/drm/amd/amdgpu/soc15.c
  73. 7 0
      drivers/gpu/drm/amd/amdgpu/soc15_common.h
  74. 1 1
      drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
  75. 63 0
      drivers/gpu/drm/amd/include/atomfirmware.h
  76. 6 0
      drivers/gpu/drm/amd/include/cgs_common.h
  77. 169 70
      drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
  78. 8 0
      drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
  79. 9 0
      drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
  80. 239 37
      drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
  81. 14 1
      drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
  82. 1291 0
      drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
  83. 16 0
      drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h
  84. 80 8
      drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
  85. 5 0
      drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
  86. 6 0
      drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
  87. 2 0
      drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h
  88. 7 6
      drivers/gpu/drm/amd/powerplay/inc/smu9.h
  89. 4 1
      drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
  90. 3 0
      drivers/gpu/drm/amd/powerplay/inc/smumgr.h
  91. 4 0
      drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
  92. 19 0
      drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
  93. 1 0
      drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
  94. 38 146
      drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
  95. 0 11
      drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
  96. 2 2
      drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
  97. 15 19
      drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
  98. 2 10
      drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
  99. 5 1
      drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
  100. 7 1
      drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h

+ 1 - 1
drivers/gpu/drm/amd/amdgpu/Makefile

@@ -25,7 +25,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
 	amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
 	amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
 	amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
-	amdgpu_queue_mgr.o
+	amdgpu_queue_mgr.o amdgpu_vf_error.o
 
 # add asic specific block
 amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \

+ 24 - 98
drivers/gpu/drm/amd/amdgpu/amdgpu.h

@@ -68,13 +68,16 @@
 
 #include "gpu_scheduler.h"
 #include "amdgpu_virt.h"
+#include "amdgpu_gart.h"
 
 /*
  * Modules parameters.
  */
 extern int amdgpu_modeset;
 extern int amdgpu_vram_limit;
-extern int amdgpu_gart_size;
+extern int amdgpu_vis_vram_limit;
+extern unsigned amdgpu_gart_size;
+extern int amdgpu_gtt_size;
 extern int amdgpu_moverate;
 extern int amdgpu_benchmarking;
 extern int amdgpu_testing;
@@ -104,6 +107,7 @@ extern unsigned amdgpu_pcie_gen_cap;
 extern unsigned amdgpu_pcie_lane_cap;
 extern unsigned amdgpu_cg_mask;
 extern unsigned amdgpu_pg_mask;
+extern unsigned amdgpu_sdma_phase_quantum;
 extern char *amdgpu_disable_cu;
 extern char *amdgpu_virtual_display;
 extern unsigned amdgpu_pp_feature_mask;
@@ -531,49 +535,6 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp,
 int amdgpu_fence_slab_init(void);
 void amdgpu_fence_slab_fini(void);
 
-/*
- * GART structures, functions & helpers
- */
-struct amdgpu_mc;
-
-#define AMDGPU_GPU_PAGE_SIZE 4096
-#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
-#define AMDGPU_GPU_PAGE_SHIFT 12
-#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
-
-struct amdgpu_gart {
-	dma_addr_t			table_addr;
-	struct amdgpu_bo		*robj;
-	void				*ptr;
-	unsigned			num_gpu_pages;
-	unsigned			num_cpu_pages;
-	unsigned			table_size;
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-	struct page			**pages;
-#endif
-	bool				ready;
-
-	/* Asic default pte flags */
-	uint64_t			gart_pte_flags;
-
-	const struct amdgpu_gart_funcs *gart_funcs;
-};
-
-int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
-void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
-int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
-void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
-int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
-void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
-int amdgpu_gart_init(struct amdgpu_device *adev);
-void amdgpu_gart_fini(struct amdgpu_device *adev);
-int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
-			int pages);
-int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
-		     int pages, struct page **pagelist,
-		     dma_addr_t *dma_addr, uint64_t flags);
-int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
-
 /*
  * VMHUB structures, functions & helpers
  */
@@ -598,22 +559,20 @@ struct amdgpu_mc {
 	 * about vram size near mc fb location */
 	u64			mc_vram_size;
 	u64			visible_vram_size;
-	u64			gtt_size;
-	u64			gtt_start;
-	u64			gtt_end;
+	u64			gart_size;
+	u64			gart_start;
+	u64			gart_end;
 	u64			vram_start;
 	u64			vram_end;
 	unsigned		vram_width;
 	u64			real_vram_size;
 	int			vram_mtrr;
-	u64                     gtt_base_align;
 	u64                     mc_mask;
 	const struct firmware   *fw;	/* MC firmware */
 	uint32_t                fw_version;
 	struct amdgpu_irq_src	vm_fault;
 	uint32_t		vram_type;
 	uint32_t                srbm_soft_reset;
-	struct amdgpu_mode_mc_save save;
 	bool			prt_warning;
 	uint64_t		stolen_size;
 	/* apertures */
@@ -1159,7 +1118,9 @@ struct amdgpu_cs_parser {
 	struct list_head		validated;
 	struct dma_fence		*fence;
 	uint64_t			bytes_moved_threshold;
+	uint64_t			bytes_moved_vis_threshold;
 	uint64_t			bytes_moved;
+	uint64_t			bytes_moved_vis;
 	struct amdgpu_bo_list_entry	*evictable;
 
 	/* user fence */
@@ -1231,7 +1192,9 @@ struct amdgpu_wb {
 int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
 void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
 int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb);
+int amdgpu_wb_get_256Bit(struct amdgpu_device *adev, u32 *wb);
 void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb);
+void amdgpu_wb_free_256bit(struct amdgpu_device *adev, u32 wb);
 
 void amdgpu_get_pcie_info(struct amdgpu_device *adev);
 
@@ -1557,6 +1520,10 @@ struct amdgpu_device {
 	spinlock_t gc_cac_idx_lock;
 	amdgpu_rreg_t			gc_cac_rreg;
 	amdgpu_wreg_t			gc_cac_wreg;
+	/* protects concurrent se_cac register access */
+	spinlock_t se_cac_idx_lock;
+	amdgpu_rreg_t			se_cac_rreg;
+	amdgpu_wreg_t			se_cac_wreg;
 	/* protects concurrent ENDPOINT (audio) register access */
 	spinlock_t audio_endpt_idx_lock;
 	amdgpu_block_rreg_t		audio_endpt_rreg;
@@ -1593,6 +1560,7 @@ struct amdgpu_device {
 		spinlock_t		lock;
 		s64			last_update_us;
 		s64			accum_us; /* accumulated microseconds */
+		s64			accum_us_vis; /* for visible VRAM */
 		u32			log2_max_MBps;
 	} mm_stats;
 
@@ -1687,6 +1655,8 @@ struct amdgpu_device {
 	bool has_hw_reset;
 	u8				reset_magic[AMDGPU_RESET_MAGIC_NUM];
 
+	/* record last mm index being written through WREG32*/
+	unsigned long last_mm_index;
 };
 
 static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1742,6 +1712,8 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
 #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
 #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
 #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
+#define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg))
+#define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v))
 #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
 #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
 #define WREG32_P(reg, val, mask)				\
@@ -1792,50 +1764,6 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
 #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
 #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
 
-/*
- * RING helpers.
- */
-static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
-{
-	if (ring->count_dw <= 0)
-		DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
-	ring->ring[ring->wptr++ & ring->buf_mask] = v;
-	ring->wptr &= ring->ptr_mask;
-	ring->count_dw--;
-}
-
-static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, void *src, int count_dw)
-{
-	unsigned occupied, chunk1, chunk2;
-	void *dst;
-
-	if (unlikely(ring->count_dw < count_dw)) {
-		DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
-		return;
-	}
-
-	occupied = ring->wptr & ring->buf_mask;
-	dst = (void *)&ring->ring[occupied];
-	chunk1 = ring->buf_mask + 1 - occupied;
-	chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1;
-	chunk2 = count_dw - chunk1;
-	chunk1 <<= 2;
-	chunk2 <<= 2;
-
-	if (chunk1)
-		memcpy(dst, src, chunk1);
-
-	if (chunk2) {
-		src += chunk1;
-		dst = (void *)ring->ring;
-		memcpy(dst, src, chunk2);
-	}
-
-	ring->wptr += count_dw;
-	ring->wptr &= ring->ptr_mask;
-	ring->count_dw -= count_dw;
-}
-
 static inline struct amdgpu_sdma_instance *
 amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 {
@@ -1898,7 +1826,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
 #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
 #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
-#define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r))
 #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
 #define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
 #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
@@ -1911,8 +1838,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
 #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
 #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
-#define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
-#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
 #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib),  (s), (d), (b))
 #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
 #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
@@ -1927,7 +1852,8 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev);
 bool amdgpu_need_post(struct amdgpu_device *adev);
 void amdgpu_update_display_priority(struct amdgpu_device *adev);
 
-void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes);
+void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
+				  u64 num_vis_bytes);
 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
@@ -1943,7 +1869,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
 				 struct ttm_mem_reg *mem);
 void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
-void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
+void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
 void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
 int amdgpu_ttm_init(struct amdgpu_device *adev);
 void amdgpu_ttm_fini(struct amdgpu_device *adev);

+ 22 - 10
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c

@@ -1686,7 +1686,7 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock)
 {
 	uint32_t bios_6_scratch;
 
-	bios_6_scratch = RREG32(mmBIOS_SCRATCH_6);
+	bios_6_scratch = RREG32(adev->bios_scratch_reg_offset + 6);
 
 	if (lock) {
 		bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
@@ -1696,15 +1696,17 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock)
 		bios_6_scratch |= ATOM_S6_ACC_MODE;
 	}
 
-	WREG32(mmBIOS_SCRATCH_6, bios_6_scratch);
+	WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch);
 }
 
 void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
 {
 	uint32_t bios_2_scratch, bios_6_scratch;
 
-	bios_2_scratch = RREG32(mmBIOS_SCRATCH_2);
-	bios_6_scratch = RREG32(mmBIOS_SCRATCH_6);
+	adev->bios_scratch_reg_offset = mmBIOS_SCRATCH_0;
+
+	bios_2_scratch = RREG32(adev->bios_scratch_reg_offset + 2);
+	bios_6_scratch = RREG32(adev->bios_scratch_reg_offset + 6);
 
 	/* let the bios control the backlight */
 	bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
@@ -1715,8 +1717,8 @@ void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
 	/* clear the vbios dpms state */
 	bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE;
 
-	WREG32(mmBIOS_SCRATCH_2, bios_2_scratch);
-	WREG32(mmBIOS_SCRATCH_6, bios_6_scratch);
+	WREG32(adev->bios_scratch_reg_offset + 2, bios_2_scratch);
+	WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch);
 }
 
 void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev)
@@ -1724,7 +1726,7 @@ void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev)
 	int i;
 
 	for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
-		adev->bios_scratch[i] = RREG32(mmBIOS_SCRATCH_0 + i);
+		adev->bios_scratch[i] = RREG32(adev->bios_scratch_reg_offset + i);
 }
 
 void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
@@ -1738,20 +1740,30 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
 	adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK;
 
 	for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
-		WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
+		WREG32(adev->bios_scratch_reg_offset + i, adev->bios_scratch[i]);
 }
 
 void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
 					      bool hung)
 {
-	u32 tmp = RREG32(mmBIOS_SCRATCH_3);
+	u32 tmp = RREG32(adev->bios_scratch_reg_offset + 3);
 
 	if (hung)
 		tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
 	else
 		tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
 
-	WREG32(mmBIOS_SCRATCH_3, tmp);
+	WREG32(adev->bios_scratch_reg_offset + 3, tmp);
+}
+
+bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev)
+{
+	u32 tmp = RREG32(adev->bios_scratch_reg_offset + 7);
+
+	if (tmp & ATOM_S7_ASIC_INIT_COMPLETE_MASK)
+		return false;
+	else
+		return true;
 }
 
 /* Atom needs data in little endian format

+ 1 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h

@@ -200,6 +200,7 @@ void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
 void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
 void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
 					      bool hung);
+bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev);
 
 void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
 int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,

+ 126 - 35
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c

@@ -66,41 +66,6 @@ void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
 	}
 }
 
-void amdgpu_atomfirmware_scratch_regs_save(struct amdgpu_device *adev)
-{
-	int i;
-
-	for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
-		adev->bios_scratch[i] = RREG32(adev->bios_scratch_reg_offset + i);
-}
-
-void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev)
-{
-	int i;
-
-	/*
-	 * VBIOS will check ASIC_INIT_COMPLETE bit to decide if
-	 * execute ASIC_Init posting via driver
-	 */
-	adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK;
-
-	for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
-		WREG32(adev->bios_scratch_reg_offset + i, adev->bios_scratch[i]);
-}
-
-void amdgpu_atomfirmware_scratch_regs_engine_hung(struct amdgpu_device *adev,
-						  bool hung)
-{
-	u32 tmp = RREG32(adev->bios_scratch_reg_offset + 3);
-
-	if (hung)
-		tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-	else
-		tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-
-	WREG32(adev->bios_scratch_reg_offset + 3, tmp);
-}
-
 int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
 {
 	struct atom_context *ctx = adev->mode_info.atom_context;
@@ -130,3 +95,129 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
 	ctx->scratch_size_bytes = usage_bytes;
 	return 0;
 }
+
+union igp_info {
+	struct atom_integrated_system_info_v1_11 v11;
+};
+
+/*
+ * Return vram width from integrated system info table, if available,
+ * or 0 if not.
+ */
+int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev)
+{
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+						integratedsysteminfo);
+	u16 data_offset, size;
+	union igp_info *igp_info;
+	u8 frev, crev;
+
+	/* get any igp specific overrides */
+	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		igp_info = (union igp_info *)
+			(mode_info->atom_context->bios + data_offset);
+		switch (crev) {
+		case 11:
+			return igp_info->v11.umachannelnumber * 64;
+		default:
+			return 0;
+		}
+	}
+
+	return 0;
+}
+
+union firmware_info {
+	struct atom_firmware_info_v3_1 v31;
+};
+
+union smu_info {
+	struct atom_smu_info_v3_1 v31;
+};
+
+union umc_info {
+	struct atom_umc_info_v3_1 v31;
+};
+
+int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
+{
+	struct amdgpu_mode_info *mode_info = &adev->mode_info;
+	struct amdgpu_pll *spll = &adev->clock.spll;
+	struct amdgpu_pll *mpll = &adev->clock.mpll;
+	uint8_t frev, crev;
+	uint16_t data_offset;
+	int ret = -EINVAL, index;
+
+	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+					    firmwareinfo);
+	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		union firmware_info *firmware_info =
+			(union firmware_info *)(mode_info->atom_context->bios +
+						data_offset);
+
+		adev->clock.default_sclk =
+			le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
+		adev->clock.default_mclk =
+			le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
+
+		adev->pm.current_sclk = adev->clock.default_sclk;
+		adev->pm.current_mclk = adev->clock.default_mclk;
+
+		/* not technically a clock, but... */
+		adev->mode_info.firmware_flags =
+			le32_to_cpu(firmware_info->v31.firmware_capability);
+
+		ret = 0;
+	}
+
+	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+					    smu_info);
+	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		union smu_info *smu_info =
+			(union smu_info *)(mode_info->atom_context->bios +
+					   data_offset);
+
+		/* system clock */
+		spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz);
+
+		spll->reference_div = 0;
+		spll->min_post_div = 1;
+		spll->max_post_div = 1;
+		spll->min_ref_div = 2;
+		spll->max_ref_div = 0xff;
+		spll->min_feedback_div = 4;
+		spll->max_feedback_div = 0xff;
+		spll->best_vco = 0;
+
+		ret = 0;
+	}
+
+	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+					    umc_info);
+	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		union umc_info *umc_info =
+			(union umc_info *)(mode_info->atom_context->bios +
+					   data_offset);
+
+		/* memory clock */
+		mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz);
+
+		mpll->reference_div = 0;
+		mpll->min_post_div = 1;
+		mpll->max_post_div = 1;
+		mpll->min_ref_div = 2;
+		mpll->max_ref_div = 0xff;
+		mpll->min_feedback_div = 4;
+		mpll->max_feedback_div = 0xff;
+		mpll->best_vco = 0;
+
+		ret = 0;
+	}
+
+	return ret;
+}

+ 2 - 4
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h

@@ -26,10 +26,8 @@
 
 bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev);
 void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
-void amdgpu_atomfirmware_scratch_regs_save(struct amdgpu_device *adev);
-void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev);
-void amdgpu_atomfirmware_scratch_regs_engine_hung(struct amdgpu_device *adev,
-						  bool hung);
 int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
 
 #endif

+ 1 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c

@@ -40,7 +40,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
 	for (i = 0; i < n; i++) {
 		struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 		r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
-				       false);
+				       false, false);
 		if (r)
 			goto exit_do_move;
 		r = dma_fence_wait(fence, false);

+ 2 - 15
drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c

@@ -86,19 +86,6 @@ static bool check_atom_bios(uint8_t *bios, size_t size)
 	return false;
 }
 
-static bool is_atom_fw(uint8_t *bios)
-{
-	uint16_t bios_header_start = bios[0x48] | (bios[0x49] << 8);
-	uint8_t frev = bios[bios_header_start + 2];
-	uint8_t crev = bios[bios_header_start + 3];
-
-	if ((frev < 3) ||
-	    ((frev == 3) && (crev < 3)))
-		return false;
-
-	return true;
-}
-
 /* If you boot an IGP board with a discrete card as the primary,
  * the IGP rom is not accessible via the rom bar as the IGP rom is
  * part of the system bios.  On boot, the system bios puts a
@@ -117,7 +104,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
 
 	adev->bios = NULL;
 	vram_base = pci_resource_start(adev->pdev, 0);
-	bios = ioremap(vram_base, size);
+	bios = ioremap_wc(vram_base, size);
 	if (!bios) {
 		return false;
 	}
@@ -455,6 +442,6 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
 	return false;
 
 success:
-	adev->is_atom_fw = is_atom_fw(adev->bios);
+	adev->is_atom_fw = (adev->asic_type >= CHIP_VEGA10) ? true : false;
 	return true;
 }

+ 8 - 4
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c

@@ -83,7 +83,7 @@ static int amdgpu_bo_list_create(struct amdgpu_device *adev,
 	r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
 	mutex_unlock(&fpriv->bo_list_lock);
 	if (r < 0) {
-		kfree(list);
+		amdgpu_bo_list_free(list);
 		return r;
 	}
 	*id = r;
@@ -198,12 +198,16 @@ amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
 	result = idr_find(&fpriv->bo_list_handles, id);
 
 	if (result) {
-		if (kref_get_unless_zero(&result->refcount))
+		if (kref_get_unless_zero(&result->refcount)) {
+			rcu_read_unlock();
 			mutex_lock(&result->lock);
-		else
+		} else {
+			rcu_read_unlock();
 			result = NULL;
+		}
+	} else {
+		rcu_read_unlock();
 	}
-	rcu_read_unlock();
 
 	return result;
 }

+ 23 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c

@@ -240,6 +240,8 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
 		return RREG32_DIDT(index);
 	case CGS_IND_REG_GC_CAC:
 		return RREG32_GC_CAC(index);
+	case CGS_IND_REG_SE_CAC:
+		return RREG32_SE_CAC(index);
 	case CGS_IND_REG__AUDIO_ENDPT:
 		DRM_ERROR("audio endpt register access not implemented.\n");
 		return 0;
@@ -266,6 +268,8 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
 		return WREG32_DIDT(index, value);
 	case CGS_IND_REG_GC_CAC:
 		return WREG32_GC_CAC(index, value);
+	case CGS_IND_REG_SE_CAC:
+		return WREG32_SE_CAC(index, value);
 	case CGS_IND_REG__AUDIO_ENDPT:
 		DRM_ERROR("audio endpt register access not implemented.\n");
 		return;
@@ -610,6 +614,17 @@ static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device,
 	return 0;
 }
 
+static void amdgpu_cgs_lock_grbm_idx(struct cgs_device *cgs_device,
+					bool lock)
+{
+	CGS_FUNC_ADEV;
+
+	if (lock)
+		mutex_lock(&adev->grbm_idx_mutex);
+	else
+		mutex_unlock(&adev->grbm_idx_mutex);
+}
+
 static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 					enum cgs_ucode_id type,
 					struct cgs_firmware_info *info)
@@ -719,7 +734,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 				strcpy(fw_name, "amdgpu/polaris12_smc.bin");
 				break;
 			case CHIP_VEGA10:
-				strcpy(fw_name, "amdgpu/vega10_smc.bin");
+				if ((adev->pdev->device == 0x687f) &&
+					((adev->pdev->revision == 0xc0) ||
+					(adev->pdev->revision == 0xc1) ||
+					(adev->pdev->revision == 0xc3)))
+					strcpy(fw_name, "amdgpu/vega10_acg_smc.bin");
+				else
+					strcpy(fw_name, "amdgpu/vega10_smc.bin");
 				break;
 			default:
 				DRM_ERROR("SMC firmware not supported\n");
@@ -1117,6 +1138,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
 	.query_system_info = amdgpu_cgs_query_system_info,
 	.is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
 	.enter_safe_mode = amdgpu_cgs_enter_safe_mode,
+	.lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
 };
 
 static const struct cgs_os_ops amdgpu_cgs_os_ops = {

+ 73 - 19
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c

@@ -223,10 +223,11 @@ static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
  * ticks. The accumulated microseconds (us) are converted to bytes and
  * returned.
  */
-static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
+static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
+					      u64 *max_bytes,
+					      u64 *max_vis_bytes)
 {
 	s64 time_us, increment_us;
-	u64 max_bytes;
 	u64 free_vram, total_vram, used_vram;
 
 	/* Allow a maximum of 200 accumulated ms. This is basically per-IB
@@ -238,8 +239,11 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
 	 */
 	const s64 us_upper_bound = 200000;
 
-	if (!adev->mm_stats.log2_max_MBps)
-		return 0;
+	if (!adev->mm_stats.log2_max_MBps) {
+		*max_bytes = 0;
+		*max_vis_bytes = 0;
+		return;
+	}
 
 	total_vram = adev->mc.real_vram_size - adev->vram_pin_size;
 	used_vram = atomic64_read(&adev->vram_usage);
@@ -280,23 +284,45 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
 		adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
 	}
 
-	/* This returns 0 if the driver is in debt to disallow (optional)
+	/* This is set to 0 if the driver is in debt to disallow (optional)
 	 * buffer moves.
 	 */
-	max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
+	*max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
+
+	/* Do the same for visible VRAM if half of it is free */
+	if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
+		u64 total_vis_vram = adev->mc.visible_vram_size;
+		u64 used_vis_vram = atomic64_read(&adev->vram_vis_usage);
+
+		if (used_vis_vram < total_vis_vram) {
+			u64 free_vis_vram = total_vis_vram - used_vis_vram;
+			adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
+							  increment_us, us_upper_bound);
+
+			if (free_vis_vram >= total_vis_vram / 2)
+				adev->mm_stats.accum_us_vis =
+					max(bytes_to_us(adev, free_vis_vram / 2),
+					    adev->mm_stats.accum_us_vis);
+		}
+
+		*max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
+	} else {
+		*max_vis_bytes = 0;
+	}
 
 	spin_unlock(&adev->mm_stats.lock);
-	return max_bytes;
 }
 
 /* Report how many bytes have really been moved for the last command
  * submission. This can result in a debt that can stop buffer migrations
  * temporarily.
  */
-void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes)
+void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
+				  u64 num_vis_bytes)
 {
 	spin_lock(&adev->mm_stats.lock);
 	adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
+	adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
 	spin_unlock(&adev->mm_stats.lock);
 }
 
@@ -304,7 +330,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
 				 struct amdgpu_bo *bo)
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-	u64 initial_bytes_moved;
+	u64 initial_bytes_moved, bytes_moved;
 	uint32_t domain;
 	int r;
 
@@ -314,17 +340,35 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
 	/* Don't move this buffer if we have depleted our allowance
 	 * to move it. Don't move anything if the threshold is zero.
 	 */
-	if (p->bytes_moved < p->bytes_moved_threshold)
-		domain = bo->prefered_domains;
-	else
+	if (p->bytes_moved < p->bytes_moved_threshold) {
+		if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+		    (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
+			/* And don't move a CPU_ACCESS_REQUIRED BO to limited
+			 * visible VRAM if we've depleted our allowance to do
+			 * that.
+			 */
+			if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
+				domain = bo->prefered_domains;
+			else
+				domain = bo->allowed_domains;
+		} else {
+			domain = bo->prefered_domains;
+		}
+	} else {
 		domain = bo->allowed_domains;
+	}
 
 retry:
 	amdgpu_ttm_placement_from_domain(bo, domain);
 	initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
 	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
-	p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
-		initial_bytes_moved;
+	bytes_moved = atomic64_read(&adev->num_bytes_moved) -
+		      initial_bytes_moved;
+	p->bytes_moved += bytes_moved;
+	if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+	    bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+	    bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
+		p->bytes_moved_vis += bytes_moved;
 
 	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
 		domain = bo->allowed_domains;
@@ -350,7 +394,8 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
 		struct amdgpu_bo_list_entry *candidate = p->evictable;
 		struct amdgpu_bo *bo = candidate->robj;
 		struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-		u64 initial_bytes_moved;
+		u64 initial_bytes_moved, bytes_moved;
+		bool update_bytes_moved_vis;
 		uint32_t other;
 
 		/* If we reached our current BO we can forget it */
@@ -370,10 +415,17 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
 
 		/* Good we can try to move this BO somewhere else */
 		amdgpu_ttm_placement_from_domain(bo, other);
+		update_bytes_moved_vis =
+			adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+			bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+			bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT;
 		initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
 		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
-		p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
+		bytes_moved = atomic64_read(&adev->num_bytes_moved) -
 			initial_bytes_moved;
+		p->bytes_moved += bytes_moved;
+		if (update_bytes_moved_vis)
+			p->bytes_moved_vis += bytes_moved;
 
 		if (unlikely(r))
 			break;
@@ -554,8 +606,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 		list_splice(&need_pages, &p->validated);
 	}
 
-	p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
+	amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
+					  &p->bytes_moved_vis_threshold);
 	p->bytes_moved = 0;
+	p->bytes_moved_vis = 0;
 	p->evictable = list_last_entry(&p->validated,
 				       struct amdgpu_bo_list_entry,
 				       tv.head);
@@ -579,8 +633,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 		goto error_validate;
 	}
 
-	amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved);
-
+	amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
+				     p->bytes_moved_vis);
 	fpriv->vm.last_eviction_counter =
 		atomic64_read(&p->adev->num_evictions);
 

+ 119 - 62
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

@@ -53,6 +53,9 @@
 #include "bif/bif_4_1_d.h"
 #include <linux/pci.h>
 #include <linux/firmware.h>
+#include "amdgpu_vf_error.h"
+
+#include "amdgpu_amdkfd.h"
 
 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -128,6 +131,10 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
 {
 	trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
 
+	if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
+		adev->last_mm_index = v;
+	}
+
 	if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
 		BUG_ON(in_interrupt());
 		return amdgpu_virt_kiq_wreg(adev, reg, v);
@@ -143,6 +150,10 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
 		writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
 		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 	}
+
+	if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
+		udelay(500);
+	}
 }
 
 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
@@ -157,6 +168,9 @@ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
 
 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 {
+	if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
+		adev->last_mm_index = v;
+	}
 
 	if ((reg * 4) < adev->rio_mem_size)
 		iowrite32(v, adev->rio_mem + (reg * 4));
@@ -164,6 +178,10 @@ void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 		iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
 		iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
 	}
+
+	if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
+		udelay(500);
+	}
 }
 
 /**
@@ -584,6 +602,21 @@ int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
 	}
 }
 
+int amdgpu_wb_get_256Bit(struct amdgpu_device *adev, u32 *wb)
+{
+	int i = 0;
+	unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
+				adev->wb.num_wb, 0, 8, 63, 0);
+	if ((offset + 7) < adev->wb.num_wb) {
+		for (i = 0; i < 8; i++)
+			__set_bit(offset + i, adev->wb.used);
+		*wb = offset;
+		return 0;
+	} else {
+		return -EINVAL;
+	}
+}
+
 /**
  * amdgpu_wb_free - Free a wb entry
  *
@@ -614,6 +647,23 @@ void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
 	}
 }
 
+/**
+ * amdgpu_wb_free_256bit - Free a wb entry
+ *
+ * @adev: amdgpu_device pointer
+ * @wb: wb index
+ *
+ * Free a wb slot allocated for use by the driver (all asics)
+ */
+void amdgpu_wb_free_256bit(struct amdgpu_device *adev, u32 wb)
+{
+	int i = 0;
+
+	if ((wb + 7) < adev->wb.num_wb)
+		for (i = 0; i < 8; i++)
+			__clear_bit(wb + i, adev->wb.used);
+}
+
 /**
  * amdgpu_vram_location - try to find VRAM location
  * @adev: amdgpu device structure holding all necessary informations
@@ -665,7 +715,7 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
 }
 
 /**
- * amdgpu_gtt_location - try to find GTT location
+ * amdgpu_gart_location - try to find GTT location
  * @adev: amdgpu device structure holding all necessary informations
  * @mc: memory controller structure holding memory informations
  *
@@ -676,28 +726,28 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
  *
  * FIXME: when reducing GTT size align new size on power of 2.
  */
-void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
+void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
 {
 	u64 size_af, size_bf;
 
-	size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
-	size_bf = mc->vram_start & ~mc->gtt_base_align;
+	size_af = adev->mc.mc_mask - mc->vram_end;
+	size_bf = mc->vram_start;
 	if (size_bf > size_af) {
-		if (mc->gtt_size > size_bf) {
+		if (mc->gart_size > size_bf) {
 			dev_warn(adev->dev, "limiting GTT\n");
-			mc->gtt_size = size_bf;
+			mc->gart_size = size_bf;
 		}
-		mc->gtt_start = 0;
+		mc->gart_start = 0;
 	} else {
-		if (mc->gtt_size > size_af) {
+		if (mc->gart_size > size_af) {
 			dev_warn(adev->dev, "limiting GTT\n");
-			mc->gtt_size = size_af;
+			mc->gart_size = size_af;
 		}
-		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
+		mc->gart_start = mc->vram_end + 1;
 	}
-	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
+	mc->gart_end = mc->gart_start + mc->gart_size - 1;
 	dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
-			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
+			mc->gart_size >> 20, mc->gart_start, mc->gart_end);
 }
 
 /*
@@ -720,7 +770,12 @@ bool amdgpu_need_post(struct amdgpu_device *adev)
 		adev->has_hw_reset = false;
 		return true;
 	}
-	/* then check MEM_SIZE, in case the crtcs are off */
+
+	/* bios scratch used on CIK+ */
+	if (adev->asic_type >= CHIP_BONAIRE)
+		return amdgpu_atombios_scratch_need_asic_init(adev);
+
+	/* check MEM_SIZE for older asics */
 	reg = amdgpu_asic_get_config_memsize(adev);
 
 	if ((reg != 0) && (reg != 0xffffffff))
@@ -1031,19 +1086,6 @@ static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 }
 
-/**
- * amdgpu_check_pot_argument - check that argument is a power of two
- *
- * @arg: value to check
- *
- * Validates that a certain argument is a power of two (all asics).
- * Returns true if argument is valid.
- */
-static bool amdgpu_check_pot_argument(int arg)
-{
-	return (arg & (arg - 1)) == 0;
-}
-
 static void amdgpu_check_block_size(struct amdgpu_device *adev)
 {
 	/* defines number of bits in page table versus page directory,
@@ -1077,7 +1119,7 @@ static void amdgpu_check_vm_size(struct amdgpu_device *adev)
 	if (amdgpu_vm_size == -1)
 		return;
 
-	if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
+	if (!is_power_of_2(amdgpu_vm_size)) {
 		dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
 			 amdgpu_vm_size);
 		goto def_value;
@@ -1118,19 +1160,24 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
 		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
 			 amdgpu_sched_jobs);
 		amdgpu_sched_jobs = 4;
-	} else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
+	} else if (!is_power_of_2(amdgpu_sched_jobs)){
 		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
 			 amdgpu_sched_jobs);
 		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
 	}
 
-	if (amdgpu_gart_size != -1) {
+	if (amdgpu_gart_size < 32) {
+		/* gart size must be greater or equal to 32M */
+		dev_warn(adev->dev, "gart size (%d) too small\n",
+			 amdgpu_gart_size);
+		amdgpu_gart_size = 32;
+	}
+
+	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
 		/* gtt size must be greater or equal to 32M */
-		if (amdgpu_gart_size < 32) {
-			dev_warn(adev->dev, "gart size (%d) too small\n",
-				 amdgpu_gart_size);
-			amdgpu_gart_size = -1;
-		}
+		dev_warn(adev->dev, "gtt size (%d) too small\n",
+				 amdgpu_gtt_size);
+		amdgpu_gtt_size = -1;
 	}
 
 	amdgpu_check_vm_size(adev);
@@ -1138,7 +1185,7 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
 	amdgpu_check_block_size(adev);
 
 	if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
-	    !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
+	    !is_power_of_2(amdgpu_vram_page_split))) {
 		dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
 			 amdgpu_vram_page_split);
 		amdgpu_vram_page_split = 1024;
@@ -2019,7 +2066,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	adev->flags = flags;
 	adev->asic_type = flags & AMD_ASIC_MASK;
 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
-	adev->mc.gtt_size = 512 * 1024 * 1024;
+	adev->mc.gart_size = 512 * 1024 * 1024;
 	adev->accel_working = false;
 	adev->num_rings = 0;
 	adev->mman.buffer_funcs = NULL;
@@ -2068,6 +2115,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	spin_lock_init(&adev->uvd_ctx_idx_lock);
 	spin_lock_init(&adev->didt_idx_lock);
 	spin_lock_init(&adev->gc_cac_idx_lock);
+	spin_lock_init(&adev->se_cac_idx_lock);
 	spin_lock_init(&adev->audio_endpt_idx_lock);
 	spin_lock_init(&adev->mm_stats.lock);
 
@@ -2143,6 +2191,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	r = amdgpu_atombios_init(adev);
 	if (r) {
 		dev_err(adev->dev, "amdgpu_atombios_init failed\n");
+		amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
 		goto failed;
 	}
 
@@ -2153,6 +2202,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	if (amdgpu_vpost_needed(adev)) {
 		if (!adev->bios) {
 			dev_err(adev->dev, "no vBIOS found\n");
+			amdgpu_vf_error_put(AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
 			r = -EINVAL;
 			goto failed;
 		}
@@ -2160,18 +2210,28 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 		r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
 		if (r) {
 			dev_err(adev->dev, "gpu post error!\n");
+			amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0);
 			goto failed;
 		}
 	} else {
 		DRM_INFO("GPU post is not needed\n");
 	}
 
-	if (!adev->is_atom_fw) {
+	if (adev->is_atom_fw) {
+		/* Initialize clocks */
+		r = amdgpu_atomfirmware_get_clock_info(adev);
+		if (r) {
+			dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
+			amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
+			goto failed;
+		}
+	} else {
 		/* Initialize clocks */
 		r = amdgpu_atombios_get_clock_info(adev);
 		if (r) {
 			dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
-			return r;
+			amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
+			goto failed;
 		}
 		/* init i2c buses */
 		amdgpu_atombios_i2c_init(adev);
@@ -2181,6 +2241,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	r = amdgpu_fence_driver_init(adev);
 	if (r) {
 		dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
+		amdgpu_vf_error_put(AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
 		goto failed;
 	}
 
@@ -2190,6 +2251,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	r = amdgpu_init(adev);
 	if (r) {
 		dev_err(adev->dev, "amdgpu_init failed\n");
+		amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
 		amdgpu_fini(adev);
 		goto failed;
 	}
@@ -2209,6 +2271,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	r = amdgpu_ib_pool_init(adev);
 	if (r) {
 		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
+		amdgpu_vf_error_put(AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
 		goto failed;
 	}
 
@@ -2253,12 +2316,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	r = amdgpu_late_init(adev);
 	if (r) {
 		dev_err(adev->dev, "amdgpu_late_init failed\n");
+		amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
 		goto failed;
 	}
 
 	return 0;
 
 failed:
+	amdgpu_vf_error_trans_all(adev);
 	if (runtime)
 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
 	return r;
@@ -2351,6 +2416,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
 	}
 	drm_modeset_unlock_all(dev);
 
+	amdgpu_amdkfd_suspend(adev);
+
 	/* unpin the front buffers and cursors */
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
@@ -2392,10 +2459,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
 	 */
 	amdgpu_bo_evict_vram(adev);
 
-	if (adev->is_atom_fw)
-		amdgpu_atomfirmware_scratch_regs_save(adev);
-	else
-		amdgpu_atombios_scratch_regs_save(adev);
+	amdgpu_atombios_scratch_regs_save(adev);
 	pci_save_state(dev->pdev);
 	if (suspend) {
 		/* Shut down the device */
@@ -2444,10 +2508,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
 		if (r)
 			goto unlock;
 	}
-	if (adev->is_atom_fw)
-		amdgpu_atomfirmware_scratch_regs_restore(adev);
-	else
-		amdgpu_atombios_scratch_regs_restore(adev);
+	amdgpu_atombios_scratch_regs_restore(adev);
 
 	/* post card */
 	if (amdgpu_need_post(adev)) {
@@ -2490,6 +2551,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
 			}
 		}
 	}
+	r = amdgpu_amdkfd_resume(adev);
+	if (r)
+		return r;
 
 	/* blat the mode back in */
 	if (fbcon) {
@@ -2860,21 +2924,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
 		r = amdgpu_suspend(adev);
 
 retry:
-		/* Disable fb access */
-		if (adev->mode_info.num_crtc) {
-			struct amdgpu_mode_mc_save save;
-			amdgpu_display_stop_mc_access(adev, &save);
-			amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
-		}
-		if (adev->is_atom_fw)
-			amdgpu_atomfirmware_scratch_regs_save(adev);
-		else
-			amdgpu_atombios_scratch_regs_save(adev);
+		amdgpu_atombios_scratch_regs_save(adev);
 		r = amdgpu_asic_reset(adev);
-		if (adev->is_atom_fw)
-			amdgpu_atomfirmware_scratch_regs_restore(adev);
-		else
-			amdgpu_atombios_scratch_regs_restore(adev);
+		amdgpu_atombios_scratch_regs_restore(adev);
 		/* post card */
 		amdgpu_atom_asic_init(adev->mode_info.atom_context);
 
@@ -2952,6 +3004,7 @@ out:
 		}
 	} else {
 		dev_err(adev->dev, "asic resume failed (%d).\n", r);
+		amdgpu_vf_error_put(AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r);
 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 			if (adev->rings[i] && adev->rings[i]->sched.thread) {
 				kthread_unpark(adev->rings[i]->sched.thread);
@@ -2962,12 +3015,16 @@ out:
 	drm_helper_resume_force_mode(adev->ddev);
 
 	ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
-	if (r)
+	if (r) {
 		/* bad news, how to tell it to userspace ? */
 		dev_info(adev->dev, "GPU reset failed\n");
-	else
+		amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+	}
+	else {
 		dev_info(adev->dev, "GPU reset successed!\n");
+	}
 
+	amdgpu_vf_error_trans_all(adev);
 	return r;
 }
 

+ 17 - 5
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c

@@ -74,7 +74,9 @@
 #define KMS_DRIVER_PATCHLEVEL	0
 
 int amdgpu_vram_limit = 0;
-int amdgpu_gart_size = -1; /* auto */
+int amdgpu_vis_vram_limit = 0;
+unsigned amdgpu_gart_size = 256;
+int amdgpu_gtt_size = -1; /* auto */
 int amdgpu_moverate = -1; /* auto */
 int amdgpu_benchmarking = 0;
 int amdgpu_testing = 0;
@@ -106,6 +108,7 @@ unsigned amdgpu_pcie_gen_cap = 0;
 unsigned amdgpu_pcie_lane_cap = 0;
 unsigned amdgpu_cg_mask = 0xffffffff;
 unsigned amdgpu_pg_mask = 0xffffffff;
+unsigned amdgpu_sdma_phase_quantum = 32;
 char *amdgpu_disable_cu = NULL;
 char *amdgpu_virtual_display = NULL;
 unsigned amdgpu_pp_feature_mask = 0xffffffff;
@@ -120,8 +123,14 @@ int amdgpu_lbpw = -1;
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
 module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
 
-MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)");
-module_param_named(gartsize, amdgpu_gart_size, int, 0600);
+MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes");
+module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444);
+
+MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc.)");
+module_param_named(gartsize, amdgpu_gart_size, uint, 0600);
+
+MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)");
+module_param_named(gttsize, amdgpu_gtt_size, int, 0600);
 
 MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
 module_param_named(moverate, amdgpu_moverate, int, 0600);
@@ -186,7 +195,7 @@ module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
 MODULE_PARM_DESC(vm_update_mode, "VM update using CPU (0 = never (default except for large BAR(LB)), 1 = Graphics only, 2 = Compute only (default for LB), 3 = Both");
 module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444);
 
-MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 1024, -1 = disable)");
+MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 512, -1 = disable)");
 module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
 
 MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
@@ -199,7 +208,7 @@ MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default
 module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
 
 MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
-module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444);
+module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
 
 MODULE_PARM_DESC(no_evict, "Support pinning request from user space (1 = enable, 0 = disable (default))");
 module_param_named(no_evict, amdgpu_no_evict, int, 0444);
@@ -219,6 +228,9 @@ module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444);
 MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)");
 module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
 
+MODULE_PARM_DESC(sdma_phase_quantum, "SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change (default 32))");
+module_param_named(sdma_phase_quantum, amdgpu_sdma_phase_quantum, uint, 0444);
+
 MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
 module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
 

+ 63 - 16
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c

@@ -55,6 +55,19 @@
 /*
  * Common GART table functions.
  */
+
+/**
+ * amdgpu_gart_set_defaults - set the default gart_size
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set the default gart_size based on parameters and available VRAM.
+ */
+void amdgpu_gart_set_defaults(struct amdgpu_device *adev)
+{
+	adev->mc.gart_size = (uint64_t)amdgpu_gart_size << 20;
+}
+
 /**
  * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
  *
@@ -262,6 +275,41 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
 	return 0;
 }
 
+/**
+ * amdgpu_gart_map - map dma_addresses into GART entries
+ *
+ * @adev: amdgpu_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to bind
+ * @dma_addr: DMA addresses of pages
+ *
+ * Map the dma_addresses into GART entries (all asics).
+ * Returns 0 for success, -EINVAL for failure.
+ */
+int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
+		    int pages, dma_addr_t *dma_addr, uint64_t flags,
+		    void *dst)
+{
+	uint64_t page_base;
+	unsigned i, j, t;
+
+	if (!adev->gart.ready) {
+		WARN(1, "trying to bind memory to uninitialized GART !\n");
+		return -EINVAL;
+	}
+
+	t = offset / AMDGPU_GPU_PAGE_SIZE;
+
+	for (i = 0; i < pages; i++) {
+		page_base = dma_addr[i];
+		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+			amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags);
+			page_base += AMDGPU_GPU_PAGE_SIZE;
+		}
+	}
+	return 0;
+}
+
 /**
  * amdgpu_gart_bind - bind pages into the gart page table
  *
@@ -279,31 +327,30 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
 		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
 		     uint64_t flags)
 {
-	unsigned t;
-	unsigned p;
-	uint64_t page_base;
-	int i, j;
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+	unsigned i,t,p;
+#endif
+	int r;
 
 	if (!adev->gart.ready) {
 		WARN(1, "trying to bind memory to uninitialized GART !\n");
 		return -EINVAL;
 	}
 
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
 	t = offset / AMDGPU_GPU_PAGE_SIZE;
 	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
-
-	for (i = 0; i < pages; i++, p++) {
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+	for (i = 0; i < pages; i++, p++)
 		adev->gart.pages[p] = pagelist[i];
 #endif
-		if (adev->gart.ptr) {
-			page_base = dma_addr[i];
-			for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
-				amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, t, page_base, flags);
-				page_base += AMDGPU_GPU_PAGE_SIZE;
-			}
-		}
+
+	if (adev->gart.ptr) {
+		r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
+			    adev->gart.ptr);
+		if (r)
+			return r;
 	}
+
 	mb();
 	amdgpu_gart_flush_gpu_tlb(adev, 0);
 	return 0;
@@ -333,8 +380,8 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
 	if (r)
 		return r;
 	/* Compute table size */
-	adev->gart.num_cpu_pages = adev->mc.gtt_size / PAGE_SIZE;
-	adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE;
+	adev->gart.num_cpu_pages = adev->mc.gart_size / PAGE_SIZE;
+	adev->gart.num_gpu_pages = adev->mc.gart_size / AMDGPU_GPU_PAGE_SIZE;
 	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
 		 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
 

+ 77 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h

@@ -0,0 +1,77 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_GART_H__
+#define __AMDGPU_GART_H__
+
+#include <linux/types.h>
+
+/*
+ * GART structures, functions & helpers
+ */
+struct amdgpu_device;
+struct amdgpu_bo;
+struct amdgpu_gart_funcs;
+
+#define AMDGPU_GPU_PAGE_SIZE 4096
+#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
+#define AMDGPU_GPU_PAGE_SHIFT 12
+#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
+
+struct amdgpu_gart {
+	dma_addr_t			table_addr;
+	struct amdgpu_bo		*robj;
+	void				*ptr;
+	unsigned			num_gpu_pages;
+	unsigned			num_cpu_pages;
+	unsigned			table_size;
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+	struct page			**pages;
+#endif
+	bool				ready;
+
+	/* Asic default pte flags */
+	uint64_t			gart_pte_flags;
+
+	const struct amdgpu_gart_funcs *gart_funcs;
+};
+
+void amdgpu_gart_set_defaults(struct amdgpu_device *adev);
+int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
+void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
+int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
+void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
+int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
+void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
+int amdgpu_gart_init(struct amdgpu_device *adev);
+void amdgpu_gart_fini(struct amdgpu_device *adev);
+int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
+		       int pages);
+int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
+		    int pages, dma_addr_t *dma_addr, uint64_t flags,
+		    void *dst);
+int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
+		     int pages, struct page **pagelist,
+		     dma_addr_t *dma_addr, uint64_t flags);
+
+#endif

+ 7 - 15
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c

@@ -49,7 +49,6 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 				struct drm_gem_object **obj)
 {
 	struct amdgpu_bo *robj;
-	unsigned long max_size;
 	int r;
 
 	*obj = NULL;
@@ -58,17 +57,6 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 		alignment = PAGE_SIZE;
 	}
 
-	if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
-		/* Maximum bo size is the unpinned gtt size since we use the gtt to
-		 * handle vram to system pool migrations.
-		 */
-		max_size = adev->mc.gtt_size - adev->gart_pin_size;
-		if (size > max_size) {
-			DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
-				  size >> 20, max_size >> 20);
-			return -ENOMEM;
-		}
-	}
 retry:
 	r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
 			     flags, NULL, NULL, &robj);
@@ -784,6 +772,7 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
 	unsigned domain;
 	const char *placement;
 	unsigned pin_count;
+	uint64_t offset;
 
 	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
 	switch (domain) {
@@ -798,9 +787,12 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
 		placement = " CPU";
 		break;
 	}
-	seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx",
-		   id, amdgpu_bo_size(bo), placement,
-		   amdgpu_bo_gpu_offset(bo));
+	seq_printf(m, "\t0x%08x: %12ld byte %s",
+		   id, amdgpu_bo_size(bo), placement);
+
+	offset = ACCESS_ONCE(bo->tbo.mem.start);
+	if (offset != AMDGPU_BO_INVALID_OFFSET)
+		seq_printf(m, " @ 0x%010Lx", offset);
 
 	pin_count = ACCESS_ONCE(bo->pin_count);
 	if (pin_count)

+ 2 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c

@@ -125,7 +125,8 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
 		if (mec >= adev->gfx.mec.num_mec)
 			break;
 
-		if (adev->gfx.mec.num_mec > 1) {
+		/* FIXME: spreading the queues across pipes causes perf regressions */
+		if (0) {
 			/* policy: amdgpu owns the first two queues of the first MEC */
 			if (mec == 0 && queue < 2)
 				set_bit(i, adev->gfx.mec.queue_bitmap);

+ 22 - 3
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c

@@ -42,13 +42,17 @@ struct amdgpu_gtt_mgr {
 static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
 			       unsigned long p_size)
 {
+	struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
 	struct amdgpu_gtt_mgr *mgr;
+	uint64_t start, size;
 
 	mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
 	if (!mgr)
 		return -ENOMEM;
 
-	drm_mm_init(&mgr->mm, 0, p_size);
+	start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
+	size = (adev->mc.gart_size >> PAGE_SHIFT) - start;
+	drm_mm_init(&mgr->mm, start, size);
 	spin_lock_init(&mgr->lock);
 	mgr->available = p_size;
 	man->priv = mgr;
@@ -80,6 +84,20 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
 	return 0;
 }
 
+/**
+ * amdgpu_gtt_mgr_is_allocated - Check if mem has address space
+ *
+ * @mem: the mem object to check
+ *
+ * Check if a mem object has already address space allocated.
+ */
+bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem)
+{
+	struct drm_mm_node *node = mem->mm_node;
+
+	return (node->start != AMDGPU_BO_INVALID_OFFSET);
+}
+
 /**
  * amdgpu_gtt_mgr_alloc - allocate new ranges
  *
@@ -95,13 +113,14 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
 			 const struct ttm_place *place,
 			 struct ttm_mem_reg *mem)
 {
+	struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
 	struct amdgpu_gtt_mgr *mgr = man->priv;
 	struct drm_mm_node *node = mem->mm_node;
 	enum drm_mm_insert_mode mode;
 	unsigned long fpfn, lpfn;
 	int r;
 
-	if (node->start != AMDGPU_BO_INVALID_OFFSET)
+	if (amdgpu_gtt_mgr_is_allocated(mem))
 		return 0;
 
 	if (place)
@@ -112,7 +131,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
 	if (place && place->lpfn)
 		lpfn = place->lpfn;
 	else
-		lpfn = man->size;
+		lpfn = adev->gart.num_cpu_pages;
 
 	mode = DRM_MM_INSERT_BEST;
 	if (place && place->flags & TTM_PL_FLAG_TOPDOWN)

+ 4 - 3
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c

@@ -130,6 +130,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 
 	unsigned i;
 	int r = 0;
+	bool need_pipe_sync = false;
 
 	if (num_ibs == 0)
 		return -EINVAL;
@@ -165,15 +166,15 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 	if (ring->funcs->emit_pipeline_sync && job &&
 	    ((tmp = amdgpu_sync_get_fence(&job->sched_sync)) ||
 	     amdgpu_vm_need_pipeline_sync(ring, job))) {
-		amdgpu_ring_emit_pipeline_sync(ring);
+		need_pipe_sync = true;
 		dma_fence_put(tmp);
 	}
 
 	if (ring->funcs->insert_start)
 		ring->funcs->insert_start(ring);
 
-	if (vm) {
-		r = amdgpu_vm_flush(ring, job);
+	if (job) {
+		r = amdgpu_vm_flush(ring, job, need_pipe_sync);
 		if (r) {
 			amdgpu_ring_undo(ring);
 			return r;

+ 4 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c

@@ -220,6 +220,10 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
 	int r = 0;
 
 	spin_lock_init(&adev->irq.lock);
+
+	/* Disable vblank irqs aggressively for power-saving */
+	adev->ddev->vblank_disable_immediate = true;
+
 	r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
 	if (r) {
 		return r;

+ 2 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c

@@ -81,6 +81,8 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
 	r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
 	if (r)
 		kfree(*job);
+	else
+		(*job)->vm_pd_addr = adev->gart.table_addr;
 
 	return r;
 }

+ 11 - 8
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c

@@ -485,7 +485,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		vram_gtt.vram_size -= adev->vram_pin_size;
 		vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
 		vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
-		vram_gtt.gtt_size  = adev->mc.gtt_size;
+		vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
+		vram_gtt.gtt_size *= PAGE_SIZE;
 		vram_gtt.gtt_size -= adev->gart_pin_size;
 		return copy_to_user(out, &vram_gtt,
 				    min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
@@ -510,9 +511,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		mem.cpu_accessible_vram.max_allocation =
 			mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
 
-		mem.gtt.total_heap_size = adev->mc.gtt_size;
-		mem.gtt.usable_heap_size =
-			adev->mc.gtt_size - adev->gart_pin_size;
+		mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
+		mem.gtt.total_heap_size *= PAGE_SIZE;
+		mem.gtt.usable_heap_size = mem.gtt.total_heap_size
+			- adev->gart_pin_size;
 		mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage);
 		mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
 
@@ -571,8 +573,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 			dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
 			dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
 		} else {
-			dev_info.max_engine_clock = adev->pm.default_sclk * 10;
-			dev_info.max_memory_clock = adev->pm.default_mclk * 10;
+			dev_info.max_engine_clock = adev->clock.default_sclk * 10;
+			dev_info.max_memory_clock = adev->clock.default_mclk * 10;
 		}
 		dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
 		dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
@@ -587,8 +589,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
 		dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
 		dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
-		dev_info.pte_fragment_size = (1 << AMDGPU_LOG2_PAGES_PER_FRAG) *
-					     AMDGPU_GPU_PAGE_SIZE;
+		dev_info.pte_fragment_size =
+			(1 << AMDGPU_LOG2_PAGES_PER_FRAG(adev)) *
+			AMDGPU_GPU_PAGE_SIZE;
 		dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
 
 		dev_info.cu_active_number = adev->gfx.cu_info.number;

+ 0 - 12
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h

@@ -257,15 +257,7 @@ struct amdgpu_audio {
 	int num_pins;
 };
 
-struct amdgpu_mode_mc_save {
-	u32 vga_render_control;
-	u32 vga_hdp_control;
-	bool crtc_enabled[AMDGPU_MAX_CRTCS];
-};
-
 struct amdgpu_display_funcs {
-	/* vga render */
-	void (*set_vga_render_state)(struct amdgpu_device *adev, bool render);
 	/* display watermarks */
 	void (*bandwidth_update)(struct amdgpu_device *adev);
 	/* get frame count */
@@ -300,10 +292,6 @@ struct amdgpu_display_funcs {
 			      uint16_t connector_object_id,
 			      struct amdgpu_hpd *hpd,
 			      struct amdgpu_router *router);
-	void (*stop_mc_access)(struct amdgpu_device *adev,
-			       struct amdgpu_mode_mc_save *save);
-	void (*resume_mc_access)(struct amdgpu_device *adev,
-				 struct amdgpu_mode_mc_save *save);
 };
 
 struct amdgpu_mode_info {

+ 41 - 29
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c

@@ -93,6 +93,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 
 	bo = container_of(tbo, struct amdgpu_bo, tbo);
 
+	amdgpu_bo_kunmap(bo);
 	amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
 
 	drm_gem_object_release(&bo->gem_base);
@@ -322,7 +323,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
 	struct amdgpu_bo *bo;
 	enum ttm_bo_type type;
 	unsigned long page_align;
-	u64 initial_bytes_moved;
+	u64 initial_bytes_moved, bytes_moved;
 	size_t acc_size;
 	int r;
 
@@ -398,8 +399,14 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
 	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
 				 &bo->placement, page_align, !kernel, NULL,
 				 acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
-	amdgpu_cs_report_moved_bytes(adev,
-		atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved);
+	bytes_moved = atomic64_read(&adev->num_bytes_moved) -
+		      initial_bytes_moved;
+	if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+	    bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+	    bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
+		amdgpu_cs_report_moved_bytes(adev, bytes_moved, bytes_moved);
+	else
+		amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
 
 	if (unlikely(r != 0))
 		return r;
@@ -426,6 +433,10 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
 
 	trace_amdgpu_bo_create(bo);
 
+	/* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
+	if (type == ttm_bo_type_device)
+		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
 	return 0;
 
 fail_unreserve:
@@ -535,7 +546,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
 
 	r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
 			       amdgpu_bo_size(bo), resv, fence,
-			       direct);
+			       direct, false);
 	if (!r)
 		amdgpu_bo_fence(bo, *fence, true);
 
@@ -588,7 +599,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
 
 	r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
 			       amdgpu_bo_size(bo), resv, fence,
-			       direct);
+			       direct, false);
 	if (!r)
 		amdgpu_bo_fence(bo, *fence, true);
 
@@ -724,15 +735,16 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 		dev_err(adev->dev, "%p pin failed\n", bo);
 		goto error;
 	}
-	r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
-	if (unlikely(r)) {
-		dev_err(adev->dev, "%p bind failed\n", bo);
-		goto error;
-	}
 
 	bo->pin_count = 1;
-	if (gpu_addr != NULL)
+	if (gpu_addr != NULL) {
+		r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
+		if (unlikely(r)) {
+			dev_err(adev->dev, "%p bind failed\n", bo);
+			goto error;
+		}
 		*gpu_addr = amdgpu_bo_gpu_offset(bo);
+	}
 	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
 		adev->vram_pin_size += amdgpu_bo_size(bo);
 		if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
@@ -921,6 +933,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 	abo = container_of(bo, struct amdgpu_bo, tbo);
 	amdgpu_vm_bo_invalidate(adev, abo);
 
+	amdgpu_bo_kunmap(abo);
+
 	/* remember the eviction */
 	if (evict)
 		atomic64_inc(&adev->num_evictions);
@@ -939,19 +953,22 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 	struct amdgpu_bo *abo;
-	unsigned long offset, size, lpfn;
-	int i, r;
+	unsigned long offset, size;
+	int r;
 
 	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
 		return 0;
 
 	abo = container_of(bo, struct amdgpu_bo, tbo);
+
+	/* Remember that this BO was accessed by the CPU */
+	abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
 	if (bo->mem.mem_type != TTM_PL_VRAM)
 		return 0;
 
 	size = bo->mem.num_pages << PAGE_SHIFT;
 	offset = bo->mem.start << PAGE_SHIFT;
-	/* TODO: figure out how to map scattered VRAM to the CPU */
 	if ((offset + size) <= adev->mc.visible_vram_size)
 		return 0;
 
@@ -961,26 +978,21 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 
 	/* hurrah the memory is not visible ! */
 	atomic64_inc(&adev->num_vram_cpu_page_faults);
-	amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
-	lpfn =	adev->mc.visible_vram_size >> PAGE_SHIFT;
-	for (i = 0; i < abo->placement.num_placement; i++) {
-		/* Force into visible VRAM */
-		if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
-		    (!abo->placements[i].lpfn ||
-		     abo->placements[i].lpfn > lpfn))
-			abo->placements[i].lpfn = lpfn;
-	}
+	amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
+					 AMDGPU_GEM_DOMAIN_GTT);
+
+	/* Avoid costly evictions; only set GTT as a busy placement */
+	abo->placement.num_busy_placement = 1;
+	abo->placement.busy_placement = &abo->placements[1];
+
 	r = ttm_bo_validate(bo, &abo->placement, false, false);
-	if (unlikely(r == -ENOMEM)) {
-		amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
-		return ttm_bo_validate(bo, &abo->placement, false, false);
-	} else if (unlikely(r != 0)) {
+	if (unlikely(r != 0))
 		return r;
-	}
 
 	offset = bo->mem.start << PAGE_SHIFT;
 	/* this should never happen */
-	if ((offset + size) > adev->mc.visible_vram_size)
+	if (bo->mem.mem_type == TTM_PL_VRAM &&
+	    (offset + size) > adev->mc.visible_vram_size)
 		return -EINVAL;
 
 	return 0;

+ 5 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h

@@ -120,7 +120,11 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
  */
 static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
 {
-	return bo->tbo.mem.mem_type != TTM_PL_SYSTEM;
+	switch (bo->tbo.mem.mem_type) {
+	case TTM_PL_TT: return amdgpu_ttm_is_bound(bo->tbo.ttm);
+	case TTM_PL_VRAM: return true;
+	default: return false;
+	}
 }
 
 int amdgpu_bo_create(struct amdgpu_device *adev,

+ 31 - 32
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c

@@ -63,8 +63,13 @@ static int psp_sw_init(void *handle)
 		psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk;
 		break;
 	case CHIP_RAVEN:
+#if 0
+		psp->init_microcode = psp_v10_0_init_microcode;
+#endif
 		psp->prep_cmd_buf = psp_v10_0_prep_cmd_buf;
 		psp->ring_init = psp_v10_0_ring_init;
+		psp->ring_create = psp_v10_0_ring_create;
+		psp->ring_destroy = psp_v10_0_ring_destroy;
 		psp->cmd_submit = psp_v10_0_cmd_submit;
 		psp->compare_sram_data = psp_v10_0_compare_sram_data;
 		break;
@@ -95,9 +100,8 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
 	int i;
 	struct amdgpu_device *adev = psp->adev;
 
-	val = RREG32(reg_index);
-
 	for (i = 0; i < adev->usec_timeout; i++) {
+		val = RREG32(reg_index);
 		if (check_changed) {
 			if (val != reg_val)
 				return 0;
@@ -118,33 +122,18 @@ psp_cmd_submit_buf(struct psp_context *psp,
 		   int index)
 {
 	int ret;
-	struct amdgpu_bo *cmd_buf_bo;
-	uint64_t cmd_buf_mc_addr;
-	struct psp_gfx_cmd_resp *cmd_buf_mem;
-	struct amdgpu_device *adev = psp->adev;
-
-	ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
-				      AMDGPU_GEM_DOMAIN_VRAM,
-				      &cmd_buf_bo, &cmd_buf_mc_addr,
-				      (void **)&cmd_buf_mem);
-	if (ret)
-		return ret;
 
-	memset(cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
+	memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
 
-	memcpy(cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
+	memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
 
-	ret = psp_cmd_submit(psp, ucode, cmd_buf_mc_addr,
+	ret = psp_cmd_submit(psp, ucode, psp->cmd_buf_mc_addr,
 			     fence_mc_addr, index);
 
 	while (*((unsigned int *)psp->fence_buf) != index) {
 		msleep(1);
 	}
 
-	amdgpu_bo_free_kernel(&cmd_buf_bo,
-			      &cmd_buf_mc_addr,
-			      (void **)&cmd_buf_mem);
-
 	return ret;
 }
 
@@ -351,6 +340,13 @@ static int psp_load_fw(struct amdgpu_device *adev)
 				      &psp->fence_buf_bo,
 				      &psp->fence_buf_mc_addr,
 				      &psp->fence_buf);
+	if (ret)
+		goto failed_mem2;
+
+	ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
+				      AMDGPU_GEM_DOMAIN_VRAM,
+				      &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
+				      (void **)&psp->cmd_buf_mem);
 	if (ret)
 		goto failed_mem1;
 
@@ -358,7 +354,7 @@ static int psp_load_fw(struct amdgpu_device *adev)
 
 	ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
 	if (ret)
-		goto failed_mem1;
+		goto failed_mem;
 
 	ret = psp_tmr_init(psp);
 	if (ret)
@@ -379,9 +375,13 @@ static int psp_load_fw(struct amdgpu_device *adev)
 	return 0;
 
 failed_mem:
+	amdgpu_bo_free_kernel(&psp->cmd_buf_bo,
+			      &psp->cmd_buf_mc_addr,
+			      (void **)&psp->cmd_buf_mem);
+failed_mem1:
 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
-failed_mem1:
+failed_mem2:
 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
 failed:
@@ -435,16 +435,15 @@ static int psp_hw_fini(void *handle)
 
 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
 
-	if (psp->tmr_buf)
-		amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
-
-	if (psp->fw_pri_buf)
-		amdgpu_bo_free_kernel(&psp->fw_pri_bo,
-				      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
-
-	if (psp->fence_buf_bo)
-		amdgpu_bo_free_kernel(&psp->fence_buf_bo,
-				      &psp->fence_buf_mc_addr, &psp->fence_buf);
+	amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
+	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
+			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
+	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
+			      &psp->fence_buf_mc_addr, &psp->fence_buf);
+	amdgpu_bo_free_kernel(&psp->asd_shared_bo, &psp->asd_shared_mc_addr,
+			      &psp->asd_shared_buf);
+	amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
+			      (void **)&psp->cmd_buf_mem);
 
 	kfree(psp->cmd);
 	psp->cmd = NULL;

+ 5 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h

@@ -108,6 +108,11 @@ struct psp_context
 	struct amdgpu_bo 		*fence_buf_bo;
 	uint64_t 			fence_buf_mc_addr;
 	void				*fence_buf;
+
+	/* cmd buffer */
+	struct amdgpu_bo		*cmd_buf_bo;
+	uint64_t			cmd_buf_mc_addr;
+	struct psp_gfx_cmd_resp		*cmd_buf_mem;
 };
 
 struct amdgpu_psp_funcs {

+ 18 - 8
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c

@@ -212,10 +212,19 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
 
 	}
 
-	r = amdgpu_wb_get(adev, &ring->fence_offs);
-	if (r) {
-		dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
-		return r;
+	if (amdgpu_sriov_vf(adev) && ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
+		r = amdgpu_wb_get_256Bit(adev, &ring->fence_offs);
+		if (r) {
+			dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
+			return r;
+		}
+
+	} else {
+		r = amdgpu_wb_get(adev, &ring->fence_offs);
+		if (r) {
+			dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
+			return r;
+		}
 	}
 
 	r = amdgpu_wb_get(adev, &ring->cond_exe_offs);
@@ -278,17 +287,18 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
 	ring->ready = false;
 
 	if (ring->funcs->support_64bit_ptrs) {
-		amdgpu_wb_free_64bit(ring->adev, ring->cond_exe_offs);
-		amdgpu_wb_free_64bit(ring->adev, ring->fence_offs);
 		amdgpu_wb_free_64bit(ring->adev, ring->rptr_offs);
 		amdgpu_wb_free_64bit(ring->adev, ring->wptr_offs);
 	} else {
-		amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
-		amdgpu_wb_free(ring->adev, ring->fence_offs);
 		amdgpu_wb_free(ring->adev, ring->rptr_offs);
 		amdgpu_wb_free(ring->adev, ring->wptr_offs);
 	}
 
+	amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
+	if (amdgpu_sriov_vf(ring->adev) && ring->funcs->type == AMDGPU_RING_TYPE_GFX)
+		amdgpu_wb_free_256bit(ring->adev, ring->fence_offs);
+	else
+		amdgpu_wb_free(ring->adev, ring->fence_offs);
 
 	amdgpu_bo_free_kernel(&ring->ring_obj,
 			      &ring->gpu_addr,

+ 40 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h

@@ -212,4 +212,44 @@ static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
 
 }
 
+static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
+{
+	if (ring->count_dw <= 0)
+		DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
+	ring->ring[ring->wptr++ & ring->buf_mask] = v;
+	ring->wptr &= ring->ptr_mask;
+	ring->count_dw--;
+}
+
+static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
+					      void *src, int count_dw)
+{
+	unsigned occupied, chunk1, chunk2;
+	void *dst;
+
+	if (unlikely(ring->count_dw < count_dw))
+		DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
+
+	occupied = ring->wptr & ring->buf_mask;
+	dst = (void *)&ring->ring[occupied];
+	chunk1 = ring->buf_mask + 1 - occupied;
+	chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1;
+	chunk2 = count_dw - chunk1;
+	chunk1 <<= 2;
+	chunk2 <<= 2;
+
+	if (chunk1)
+		memcpy(dst, src, chunk1);
+
+	if (chunk2) {
+		src += chunk1;
+		dst = (void *)ring->ring;
+		memcpy(dst, src, chunk2);
+	}
+
+	ring->wptr += count_dw;
+	ring->wptr &= ring->ptr_mask;
+	ring->count_dw -= count_dw;
+}
+
 #endif

+ 26 - 26
drivers/gpu/drm/amd/amdgpu/amdgpu_test.c

@@ -33,7 +33,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 	struct amdgpu_bo *vram_obj = NULL;
 	struct amdgpu_bo **gtt_obj = NULL;
-	uint64_t gtt_addr, vram_addr;
+	uint64_t gart_addr, vram_addr;
 	unsigned n, size;
 	int i, r;
 
@@ -42,7 +42,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 	/* Number of tests =
 	 * (Total GTT - IB pool - writeback page - ring buffers) / test size
 	 */
-	n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024;
+	n = adev->mc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024;
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
 		if (adev->rings[i])
 			n -= adev->rings[i]->ring_size;
@@ -76,7 +76,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 	}
 	for (i = 0; i < n; i++) {
 		void *gtt_map, *vram_map;
-		void **gtt_start, **gtt_end;
+		void **gart_start, **gart_end;
 		void **vram_start, **vram_end;
 		struct dma_fence *fence = NULL;
 
@@ -91,7 +91,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 		r = amdgpu_bo_reserve(gtt_obj[i], false);
 		if (unlikely(r != 0))
 			goto out_lclean_unref;
-		r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gtt_addr);
+		r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gart_addr);
 		if (r) {
 			DRM_ERROR("Failed to pin GTT object %d\n", i);
 			goto out_lclean_unres;
@@ -103,15 +103,15 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 			goto out_lclean_unpin;
 		}
 
-		for (gtt_start = gtt_map, gtt_end = gtt_map + size;
-		     gtt_start < gtt_end;
-		     gtt_start++)
-			*gtt_start = gtt_start;
+		for (gart_start = gtt_map, gart_end = gtt_map + size;
+		     gart_start < gart_end;
+		     gart_start++)
+			*gart_start = gart_start;
 
 		amdgpu_bo_kunmap(gtt_obj[i]);
 
-		r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
-				       size, NULL, &fence, false);
+		r = amdgpu_copy_buffer(ring, gart_addr, vram_addr,
+				       size, NULL, &fence, false, false);
 
 		if (r) {
 			DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
@@ -132,21 +132,21 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 			goto out_lclean_unpin;
 		}
 
-		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
+		for (gart_start = gtt_map, gart_end = gtt_map + size,
 		     vram_start = vram_map, vram_end = vram_map + size;
 		     vram_start < vram_end;
-		     gtt_start++, vram_start++) {
-			if (*vram_start != gtt_start) {
+		     gart_start++, vram_start++) {
+			if (*vram_start != gart_start) {
 				DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
 					  "expected 0x%p (GTT/VRAM offset "
 					  "0x%16llx/0x%16llx)\n",
-					  i, *vram_start, gtt_start,
+					  i, *vram_start, gart_start,
 					  (unsigned long long)
-					  (gtt_addr - adev->mc.gtt_start +
-					   (void*)gtt_start - gtt_map),
+					  (gart_addr - adev->mc.gart_start +
+					   (void*)gart_start - gtt_map),
 					  (unsigned long long)
 					  (vram_addr - adev->mc.vram_start +
-					   (void*)gtt_start - gtt_map));
+					   (void*)gart_start - gtt_map));
 				amdgpu_bo_kunmap(vram_obj);
 				goto out_lclean_unpin;
 			}
@@ -155,8 +155,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 
 		amdgpu_bo_kunmap(vram_obj);
 
-		r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
-				       size, NULL, &fence, false);
+		r = amdgpu_copy_buffer(ring, vram_addr, gart_addr,
+				       size, NULL, &fence, false, false);
 
 		if (r) {
 			DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
@@ -177,20 +177,20 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 			goto out_lclean_unpin;
 		}
 
-		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
+		for (gart_start = gtt_map, gart_end = gtt_map + size,
 		     vram_start = vram_map, vram_end = vram_map + size;
-		     gtt_start < gtt_end;
-		     gtt_start++, vram_start++) {
-			if (*gtt_start != vram_start) {
+		     gart_start < gart_end;
+		     gart_start++, vram_start++) {
+			if (*gart_start != vram_start) {
 				DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
 					  "expected 0x%p (VRAM/GTT offset "
 					  "0x%16llx/0x%16llx)\n",
-					  i, *gtt_start, vram_start,
+					  i, *gart_start, vram_start,
 					  (unsigned long long)
 					  (vram_addr - adev->mc.vram_start +
 					   (void*)vram_start - vram_map),
 					  (unsigned long long)
-					  (gtt_addr - adev->mc.gtt_start +
+					  (gart_addr - adev->mc.gart_start +
 					   (void*)vram_start - vram_map));
 				amdgpu_bo_kunmap(gtt_obj[i]);
 				goto out_lclean_unpin;
@@ -200,7 +200,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 		amdgpu_bo_kunmap(gtt_obj[i]);
 
 		DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
-			 gtt_addr - adev->mc.gtt_start);
+			 gart_addr - adev->mc.gart_start);
 		continue;
 
 out_lclean_unpin:

+ 9 - 9
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h

@@ -224,7 +224,7 @@ TRACE_EVENT(amdgpu_vm_bo_map,
 			     __field(long, start)
 			     __field(long, last)
 			     __field(u64, offset)
-			     __field(u32, flags)
+			     __field(u64, flags)
 			     ),
 
 	    TP_fast_assign(
@@ -234,7 +234,7 @@ TRACE_EVENT(amdgpu_vm_bo_map,
 			   __entry->offset = mapping->offset;
 			   __entry->flags = mapping->flags;
 			   ),
-	    TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x",
+	    TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%llx",
 		      __entry->bo, __entry->start, __entry->last,
 		      __entry->offset, __entry->flags)
 );
@@ -248,7 +248,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
 			     __field(long, start)
 			     __field(long, last)
 			     __field(u64, offset)
-			     __field(u32, flags)
+			     __field(u64, flags)
 			     ),
 
 	    TP_fast_assign(
@@ -258,7 +258,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
 			   __entry->offset = mapping->offset;
 			   __entry->flags = mapping->flags;
 			   ),
-	    TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x",
+	    TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%llx",
 		      __entry->bo, __entry->start, __entry->last,
 		      __entry->offset, __entry->flags)
 );
@@ -269,7 +269,7 @@ DECLARE_EVENT_CLASS(amdgpu_vm_mapping,
 	    TP_STRUCT__entry(
 			     __field(u64, soffset)
 			     __field(u64, eoffset)
-			     __field(u32, flags)
+			     __field(u64, flags)
 			     ),
 
 	    TP_fast_assign(
@@ -277,7 +277,7 @@ DECLARE_EVENT_CLASS(amdgpu_vm_mapping,
 			   __entry->eoffset = mapping->last + 1;
 			   __entry->flags = mapping->flags;
 			   ),
-	    TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
+	    TP_printk("soffs=%010llx, eoffs=%010llx, flags=%llx",
 		      __entry->soffset, __entry->eoffset, __entry->flags)
 );
 
@@ -293,14 +293,14 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
 
 TRACE_EVENT(amdgpu_vm_set_ptes,
 	    TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
-		     uint32_t incr, uint32_t flags),
+		     uint32_t incr, uint64_t flags),
 	    TP_ARGS(pe, addr, count, incr, flags),
 	    TP_STRUCT__entry(
 			     __field(u64, pe)
 			     __field(u64, addr)
 			     __field(u32, count)
 			     __field(u32, incr)
-			     __field(u32, flags)
+			     __field(u64, flags)
 			     ),
 
 	    TP_fast_assign(
@@ -310,7 +310,7 @@ TRACE_EVENT(amdgpu_vm_set_ptes,
 			   __entry->incr = incr;
 			   __entry->flags = flags;
 			   ),
-	    TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u",
+	    TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u",
 		      __entry->pe, __entry->addr, __entry->incr,
 		      __entry->flags, __entry->count)
 );

+ 276 - 92
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c

@@ -47,10 +47,15 @@
 
 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
 
+static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
+			     struct ttm_mem_reg *mem, unsigned num_pages,
+			     uint64_t offset, unsigned window,
+			     struct amdgpu_ring *ring,
+			     uint64_t *addr);
+
 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
 
-
 /*
  * Global memory.
  */
@@ -97,6 +102,8 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
 		goto error_bo;
 	}
 
+	mutex_init(&adev->mman.gtt_window_lock);
+
 	ring = adev->mman.buffer_funcs_ring;
 	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
 	r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
@@ -123,6 +130,7 @@ static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
 	if (adev->mman.mem_global_referenced) {
 		amd_sched_entity_fini(adev->mman.entity.sched,
 				      &adev->mman.entity);
+		mutex_destroy(&adev->mman.gtt_window_lock);
 		drm_global_item_unref(&adev->mman.bo_global_ref.ref);
 		drm_global_item_unref(&adev->mman.mem_global_ref);
 		adev->mman.mem_global_referenced = false;
@@ -150,7 +158,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 		break;
 	case TTM_PL_TT:
 		man->func = &amdgpu_gtt_mgr_func;
-		man->gpu_offset = adev->mc.gtt_start;
+		man->gpu_offset = adev->mc.gart_start;
 		man->available_caching = TTM_PL_MASK_CACHING;
 		man->default_caching = TTM_PL_FLAG_CACHED;
 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -186,12 +194,11 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 	struct amdgpu_bo *abo;
-	static struct ttm_place placements = {
+	static const struct ttm_place placements = {
 		.fpfn = 0,
 		.lpfn = 0,
 		.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
 	};
-	unsigned i;
 
 	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
 		placement->placement = &placements;
@@ -207,22 +214,36 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 		    adev->mman.buffer_funcs_ring &&
 		    adev->mman.buffer_funcs_ring->ready == false) {
 			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+		} else if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+			   !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
+			unsigned fpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
+			struct drm_mm_node *node = bo->mem.mm_node;
+			unsigned long pages_left;
+
+			for (pages_left = bo->mem.num_pages;
+			     pages_left;
+			     pages_left -= node->size, node++) {
+				if (node->start < fpfn)
+					break;
+			}
+
+			if (!pages_left)
+				goto gtt;
+
+			/* Try evicting to the CPU inaccessible part of VRAM
+			 * first, but only set GTT as busy placement, so this
+			 * BO will be evicted to GTT rather than causing other
+			 * BOs to be evicted from VRAM
+			 */
+			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
+							 AMDGPU_GEM_DOMAIN_GTT);
+			abo->placements[0].fpfn = fpfn;
+			abo->placements[0].lpfn = 0;
+			abo->placement.busy_placement = &abo->placements[1];
+			abo->placement.num_busy_placement = 1;
 		} else {
+gtt:
 			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
-			for (i = 0; i < abo->placement.num_placement; ++i) {
-				if (!(abo->placements[i].flags &
-				      TTM_PL_FLAG_TT))
-					continue;
-
-				if (abo->placements[i].lpfn)
-					continue;
-
-				/* set an upper limit to force directly
-				 * allocating address space for the BO.
-				 */
-				abo->placements[i].lpfn =
-					adev->mc.gtt_size >> PAGE_SHIFT;
-			}
 		}
 		break;
 	case TTM_PL_TT:
@@ -252,29 +273,18 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
 	new_mem->mm_node = NULL;
 }
 
-static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
-			       struct drm_mm_node *mm_node,
-			       struct ttm_mem_reg *mem,
-			       uint64_t *addr)
+static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
+				    struct drm_mm_node *mm_node,
+				    struct ttm_mem_reg *mem)
 {
-	int r;
-
-	switch (mem->mem_type) {
-	case TTM_PL_TT:
-		r = amdgpu_ttm_bind(bo, mem);
-		if (r)
-			return r;
+	uint64_t addr = 0;
 
-	case TTM_PL_VRAM:
-		*addr = mm_node->start << PAGE_SHIFT;
-		*addr += bo->bdev->man[mem->mem_type].gpu_offset;
-		break;
-	default:
-		DRM_ERROR("Unknown placement %d\n", mem->mem_type);
-		return -EINVAL;
+	if (mem->mem_type != TTM_PL_TT ||
+	    amdgpu_gtt_mgr_is_allocated(mem)) {
+		addr = mm_node->start << PAGE_SHIFT;
+		addr += bo->bdev->man[mem->mem_type].gpu_offset;
 	}
-
-	return 0;
+	return addr;
 }
 
 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
@@ -299,26 +309,40 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 	}
 
 	old_mm = old_mem->mm_node;
-	r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start);
-	if (r)
-		return r;
 	old_size = old_mm->size;
-
+	old_start = amdgpu_mm_node_addr(bo, old_mm, old_mem);
 
 	new_mm = new_mem->mm_node;
-	r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start);
-	if (r)
-		return r;
 	new_size = new_mm->size;
+	new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem);
 
 	num_pages = new_mem->num_pages;
+	mutex_lock(&adev->mman.gtt_window_lock);
 	while (num_pages) {
-		unsigned long cur_pages = min(old_size, new_size);
+		unsigned long cur_pages = min(min(old_size, new_size),
+					      (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE);
+		uint64_t from = old_start, to = new_start;
 		struct dma_fence *next;
 
-		r = amdgpu_copy_buffer(ring, old_start, new_start,
+		if (old_mem->mem_type == TTM_PL_TT &&
+		    !amdgpu_gtt_mgr_is_allocated(old_mem)) {
+			r = amdgpu_map_buffer(bo, old_mem, cur_pages,
+					      old_start, 0, ring, &from);
+			if (r)
+				goto error;
+		}
+
+		if (new_mem->mem_type == TTM_PL_TT &&
+		    !amdgpu_gtt_mgr_is_allocated(new_mem)) {
+			r = amdgpu_map_buffer(bo, new_mem, cur_pages,
+					      new_start, 1, ring, &to);
+			if (r)
+				goto error;
+		}
+
+		r = amdgpu_copy_buffer(ring, from, to,
 				       cur_pages * PAGE_SIZE,
-				       bo->resv, &next, false);
+				       bo->resv, &next, false, true);
 		if (r)
 			goto error;
 
@@ -331,10 +355,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 
 		old_size -= cur_pages;
 		if (!old_size) {
-			r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem,
-						&old_start);
-			if (r)
-				goto error;
+			old_start = amdgpu_mm_node_addr(bo, ++old_mm, old_mem);
 			old_size = old_mm->size;
 		} else {
 			old_start += cur_pages * PAGE_SIZE;
@@ -342,22 +363,21 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 
 		new_size -= cur_pages;
 		if (!new_size) {
-			r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem,
-						&new_start);
-			if (r)
-				goto error;
-
+			new_start = amdgpu_mm_node_addr(bo, ++new_mm, new_mem);
 			new_size = new_mm->size;
 		} else {
 			new_start += cur_pages * PAGE_SIZE;
 		}
 	}
+	mutex_unlock(&adev->mman.gtt_window_lock);
 
 	r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
 	dma_fence_put(fence);
 	return r;
 
 error:
+	mutex_unlock(&adev->mman.gtt_window_lock);
+
 	if (fence)
 		dma_fence_wait(fence, false);
 	dma_fence_put(fence);
@@ -384,7 +404,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
 	placement.num_busy_placement = 1;
 	placement.busy_placement = &placements;
 	placements.fpfn = 0;
-	placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
+	placements.lpfn = 0;
 	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
 	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
 			     interruptible, no_wait_gpu);
@@ -431,7 +451,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
 	placement.num_busy_placement = 1;
 	placement.busy_placement = &placements;
 	placements.fpfn = 0;
-	placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
+	placements.lpfn = 0;
 	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
 	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
 			     interruptible, no_wait_gpu);
@@ -507,6 +527,15 @@ memcpy:
 		}
 	}
 
+	if (bo->type == ttm_bo_type_device &&
+	    new_mem->mem_type == TTM_PL_VRAM &&
+	    old_mem->mem_type != TTM_PL_VRAM) {
+		/* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
+		 * accesses the BO after it's moved.
+		 */
+		abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+	}
+
 	/* update statistics */
 	atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
 	return 0;
@@ -695,6 +724,31 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
 	sg_free_table(ttm->sg);
 }
 
+static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
+{
+	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+	uint64_t flags;
+	int r;
+
+	spin_lock(&gtt->adev->gtt_list_lock);
+	flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, mem);
+	gtt->offset = (u64)mem->start << PAGE_SHIFT;
+	r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
+		ttm->pages, gtt->ttm.dma_address, flags);
+
+	if (r) {
+		DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+			  ttm->num_pages, gtt->offset);
+		goto error_gart_bind;
+	}
+
+	list_add_tail(&gtt->list, &gtt->adev->gtt_list);
+error_gart_bind:
+	spin_unlock(&gtt->adev->gtt_list_lock);
+	return r;
+
+}
+
 static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
 				   struct ttm_mem_reg *bo_mem)
 {
@@ -718,7 +772,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
 	    bo_mem->mem_type == AMDGPU_PL_OA)
 		return -EINVAL;
 
-	return 0;
+	if (amdgpu_gtt_mgr_is_allocated(bo_mem))
+	    r = amdgpu_ttm_do_bind(ttm, bo_mem);
+
+	return r;
 }
 
 bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
@@ -731,8 +788,6 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
 int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
 {
 	struct ttm_tt *ttm = bo->ttm;
-	struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
-	uint64_t flags;
 	int r;
 
 	if (!ttm || amdgpu_ttm_is_bound(ttm))
@@ -745,22 +800,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
 		return r;
 	}
 
-	spin_lock(&gtt->adev->gtt_list_lock);
-	flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
-	gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
-	r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
-		ttm->pages, gtt->ttm.dma_address, flags);
-
-	if (r) {
-		DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
-			  ttm->num_pages, gtt->offset);
-		goto error_gart_bind;
-	}
-
-	list_add_tail(&gtt->list, &gtt->adev->gtt_list);
-error_gart_bind:
-	spin_unlock(&gtt->adev->gtt_list_lock);
-	return r;
+	return amdgpu_ttm_do_bind(ttm, bo_mem);
 }
 
 int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
@@ -1075,6 +1115,67 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 	return ttm_bo_eviction_valuable(bo, place);
 }
 
+static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
+				    unsigned long offset,
+				    void *buf, int len, int write)
+{
+	struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
+	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+	struct drm_mm_node *nodes = abo->tbo.mem.mm_node;
+	uint32_t value = 0;
+	int ret = 0;
+	uint64_t pos;
+	unsigned long flags;
+
+	if (bo->mem.mem_type != TTM_PL_VRAM)
+		return -EIO;
+
+	while (offset >= (nodes->size << PAGE_SHIFT)) {
+		offset -= nodes->size << PAGE_SHIFT;
+		++nodes;
+	}
+	pos = (nodes->start << PAGE_SHIFT) + offset;
+
+	while (len && pos < adev->mc.mc_vram_size) {
+		uint64_t aligned_pos = pos & ~(uint64_t)3;
+		uint32_t bytes = 4 - (pos & 3);
+		uint32_t shift = (pos & 3) * 8;
+		uint32_t mask = 0xffffffff << shift;
+
+		if (len < bytes) {
+			mask &= 0xffffffff >> (bytes - len) * 8;
+			bytes = len;
+		}
+
+		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+		WREG32(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
+		WREG32(mmMM_INDEX_HI, aligned_pos >> 31);
+		if (!write || mask != 0xffffffff)
+			value = RREG32(mmMM_DATA);
+		if (write) {
+			value &= ~mask;
+			value |= (*(uint32_t *)buf << shift) & mask;
+			WREG32(mmMM_DATA, value);
+		}
+		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+		if (!write) {
+			value = (value & mask) >> shift;
+			memcpy(buf, &value, bytes);
+		}
+
+		ret += bytes;
+		buf = (uint8_t *)buf + bytes;
+		pos += bytes;
+		len -= bytes;
+		if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
+			++nodes;
+			pos = (nodes->start << PAGE_SHIFT);
+		}
+	}
+
+	return ret;
+}
+
 static struct ttm_bo_driver amdgpu_bo_driver = {
 	.ttm_tt_create = &amdgpu_ttm_tt_create,
 	.ttm_tt_populate = &amdgpu_ttm_tt_populate,
@@ -1090,11 +1191,14 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
 	.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
 	.io_mem_free = &amdgpu_ttm_io_mem_free,
 	.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
+	.access_memory = &amdgpu_ttm_access_memory
 };
 
 int amdgpu_ttm_init(struct amdgpu_device *adev)
 {
+	uint64_t gtt_size;
 	int r;
+	u64 vis_vram_limit;
 
 	r = amdgpu_ttm_global_init(adev);
 	if (r) {
@@ -1118,6 +1222,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 		DRM_ERROR("Failed initializing VRAM heap.\n");
 		return r;
 	}
+
+	/* Reduce size of CPU-visible VRAM if requested */
+	vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
+	if (amdgpu_vis_vram_limit > 0 &&
+	    vis_vram_limit <= adev->mc.visible_vram_size)
+		adev->mc.visible_vram_size = vis_vram_limit;
+
 	/* Change the size here instead of the init above so only lpfn is affected */
 	amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
 
@@ -1140,14 +1251,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 	}
 	DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
 		 (unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
-	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT,
-				adev->mc.gtt_size >> PAGE_SHIFT);
+
+	if (amdgpu_gtt_size == -1)
+		gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
+			       adev->mc.mc_vram_size);
+	else
+		gtt_size = (uint64_t)amdgpu_gtt_size << 20;
+	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
 	if (r) {
 		DRM_ERROR("Failed initializing GTT heap.\n");
 		return r;
 	}
 	DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
-		 (unsigned)(adev->mc.gtt_size / (1024 * 1024)));
+		 (unsigned)(gtt_size / (1024 * 1024)));
 
 	adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
 	adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
@@ -1256,12 +1372,77 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
 	return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
 }
 
-int amdgpu_copy_buffer(struct amdgpu_ring *ring,
-		       uint64_t src_offset,
-		       uint64_t dst_offset,
-		       uint32_t byte_count,
+static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
+			     struct ttm_mem_reg *mem, unsigned num_pages,
+			     uint64_t offset, unsigned window,
+			     struct amdgpu_ring *ring,
+			     uint64_t *addr)
+{
+	struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
+	struct amdgpu_device *adev = ring->adev;
+	struct ttm_tt *ttm = bo->ttm;
+	struct amdgpu_job *job;
+	unsigned num_dw, num_bytes;
+	dma_addr_t *dma_address;
+	struct dma_fence *fence;
+	uint64_t src_addr, dst_addr;
+	uint64_t flags;
+	int r;
+
+	BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
+	       AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
+
+	*addr = adev->mc.gart_start;
+	*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
+		AMDGPU_GPU_PAGE_SIZE;
+
+	num_dw = adev->mman.buffer_funcs->copy_num_dw;
+	while (num_dw & 0x7)
+		num_dw++;
+
+	num_bytes = num_pages * 8;
+
+	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
+	if (r)
+		return r;
+
+	src_addr = num_dw * 4;
+	src_addr += job->ibs[0].gpu_addr;
+
+	dst_addr = adev->gart.table_addr;
+	dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
+	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
+				dst_addr, num_bytes);
+
+	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+	WARN_ON(job->ibs[0].length_dw > num_dw);
+
+	dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
+	flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
+	r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
+			    &job->ibs[0].ptr[num_dw]);
+	if (r)
+		goto error_free;
+
+	r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+			      AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+	if (r)
+		goto error_free;
+
+	dma_fence_put(fence);
+
+	return r;
+
+error_free:
+	amdgpu_job_free(job);
+	return r;
+}
+
+int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
+		       uint64_t dst_offset, uint32_t byte_count,
 		       struct reservation_object *resv,
-		       struct dma_fence **fence, bool direct_submit)
+		       struct dma_fence **fence, bool direct_submit,
+		       bool vm_needs_flush)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_job *job;
@@ -1283,6 +1464,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
 	if (r)
 		return r;
 
+	job->vm_needs_flush = vm_needs_flush;
 	if (resv) {
 		r = amdgpu_sync_resv(adev, &job->sync, resv,
 				     AMDGPU_FENCE_OWNER_UNDEFINED);
@@ -1347,6 +1529,12 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 		return -EINVAL;
 	}
 
+	if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+		r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
+		if (r)
+			return r;
+	}
+
 	num_pages = bo->tbo.num_pages;
 	mm_node = bo->tbo.mem.mm_node;
 	num_loops = 0;
@@ -1382,11 +1570,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 		uint32_t byte_count = mm_node->size << PAGE_SHIFT;
 		uint64_t dst_addr;
 
-		r = amdgpu_mm_node_addr(&bo->tbo, mm_node,
-					&bo->tbo.mem, &dst_addr);
-		if (r)
-			return r;
-
+		dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
 		while (byte_count) {
 			uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
 
@@ -1574,7 +1758,7 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
 				  adev, &amdgpu_ttm_gtt_fops);
 	if (IS_ERR(ent))
 		return PTR_ERR(ent);
-	i_size_write(ent->d_inode, adev->mc.gtt_size);
+	i_size_write(ent->d_inode, adev->mc.gart_size);
 	adev->mman.gtt = ent;
 
 #endif

+ 11 - 5
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h

@@ -34,6 +34,9 @@
 #define AMDGPU_PL_FLAG_GWS		(TTM_PL_FLAG_PRIV << 1)
 #define AMDGPU_PL_FLAG_OA		(TTM_PL_FLAG_PRIV << 2)
 
+#define AMDGPU_GTT_MAX_TRANSFER_SIZE	512
+#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS	2
+
 struct amdgpu_mman {
 	struct ttm_bo_global_ref        bo_global_ref;
 	struct drm_global_reference	mem_global_ref;
@@ -49,6 +52,8 @@ struct amdgpu_mman {
 	/* buffer handling */
 	const struct amdgpu_buffer_funcs	*buffer_funcs;
 	struct amdgpu_ring			*buffer_funcs_ring;
+
+	struct mutex				gtt_window_lock;
 	/* Scheduler entity for buffer moves */
 	struct amd_sched_entity			entity;
 };
@@ -56,17 +61,17 @@ struct amdgpu_mman {
 extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
 extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
 
+bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem);
 int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
 			 struct ttm_buffer_object *tbo,
 			 const struct ttm_place *place,
 			 struct ttm_mem_reg *mem);
 
-int amdgpu_copy_buffer(struct amdgpu_ring *ring,
-		       uint64_t src_offset,
-		       uint64_t dst_offset,
-		       uint32_t byte_count,
+int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
+		       uint64_t dst_offset, uint32_t byte_count,
 		       struct reservation_object *resv,
-		       struct dma_fence **fence, bool direct_submit);
+		       struct dma_fence **fence, bool direct_submit,
+		       bool vm_needs_flush);
 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 			uint32_t src_data,
 			struct reservation_object *resv,
@@ -75,5 +80,6 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
 bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
 int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem);
+int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
 
 #endif

+ 9 - 5
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c

@@ -275,14 +275,10 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
 		else
 			return AMDGPU_FW_LOAD_PSP;
 	case CHIP_RAVEN:
-#if 0
-		if (!load_type)
+		if (load_type != 2)
 			return AMDGPU_FW_LOAD_DIRECT;
 		else
 			return AMDGPU_FW_LOAD_PSP;
-#else
-		return AMDGPU_FW_LOAD_DIRECT;
-#endif
 	default:
 		DRM_ERROR("Unknow firmware load type\n");
 	}
@@ -377,6 +373,11 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
 	struct amdgpu_firmware_info *ucode = NULL;
 	const struct common_firmware_header *header = NULL;
 
+	if (!adev->firmware.fw_size) {
+		dev_warn(adev->dev, "No ip firmware need to load\n");
+		return 0;
+	}
+
 	err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
 				amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
 				AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
@@ -459,6 +460,9 @@ int amdgpu_ucode_fini_bo(struct amdgpu_device *adev)
 	int i;
 	struct amdgpu_firmware_info *ucode = NULL;
 
+	if (!adev->firmware.fw_size)
+		return 0;
+
 	for (i = 0; i < adev->firmware.max_ucodes; i++) {
 		ucode = &adev->firmware.ucode[i];
 		if (ucode->fw) {

+ 85 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c

@@ -0,0 +1,85 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_vf_error.h"
+#include "mxgpu_ai.h"
+
+#define AMDGPU_VF_ERROR_ENTRY_SIZE    16 
+
+/* struct error_entry - amdgpu VF error information. */
+struct amdgpu_vf_error_buffer {
+	int read_count;
+	int write_count;
+	uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE];
+	uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE];
+	uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
+};
+
+struct amdgpu_vf_error_buffer admgpu_vf_errors;
+
+
+void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data)
+{
+	int index;
+	uint16_t error_code = AMDGIM_ERROR_CODE(AMDGIM_ERROR_CATEGORY_VF, sub_error_code);
+
+	index = admgpu_vf_errors.write_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
+	admgpu_vf_errors.code [index] = error_code;
+	admgpu_vf_errors.flags [index] = error_flags;
+	admgpu_vf_errors.data [index] = error_data;
+	admgpu_vf_errors.write_count ++;
+}
+
+
+void amdgpu_vf_error_trans_all(struct amdgpu_device *adev)
+{
+	/* u32 pf2vf_flags = 0; */
+	u32 data1, data2, data3;
+	int index;
+
+	if ((NULL == adev) || (!amdgpu_sriov_vf(adev)) || (!adev->virt.ops) || (!adev->virt.ops->trans_msg)) {
+		return;
+	}
+/*
+ 	TODO: Enable these code when pv2vf_info is merged
+	AMDGPU_FW_VRAM_PF2VF_READ (adev, feature_flags, &pf2vf_flags);
+	if (!(pf2vf_flags & AMDGIM_FEATURE_ERROR_LOG_COLLECT)) {
+		return;
+	}
+*/
+	/* The errors are overlay of array, correct read_count as full. */
+	if (admgpu_vf_errors.write_count - admgpu_vf_errors.read_count > AMDGPU_VF_ERROR_ENTRY_SIZE) {
+		admgpu_vf_errors.read_count = admgpu_vf_errors.write_count - AMDGPU_VF_ERROR_ENTRY_SIZE;
+	}
+
+	while (admgpu_vf_errors.read_count < admgpu_vf_errors.write_count) {
+		index =admgpu_vf_errors.read_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
+		data1 = AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX (admgpu_vf_errors.code[index], admgpu_vf_errors.flags[index]);
+		data2 = admgpu_vf_errors.data[index] & 0xFFFFFFFF;
+		data3 = (admgpu_vf_errors.data[index] >> 32) & 0xFFFFFFFF;
+
+		adev->virt.ops->trans_msg(adev, IDH_LOG_VF_ERROR, data1, data2, data3);
+		admgpu_vf_errors.read_count ++;
+	}
+}

+ 62 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h

@@ -0,0 +1,62 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __VF_ERROR_H__
+#define __VF_ERROR_H__
+
+#define AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX(c,f)    (((c & 0xFFFF) << 16) | (f & 0xFFFF))
+#define AMDGIM_ERROR_CODE(t,c)       (((t&0xF)<<12)|(c&0xFFF))
+
+/* Please keep enum same as AMD GIM driver */
+enum AMDGIM_ERROR_VF {
+	AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL = 0,
+	AMDGIM_ERROR_VF_NO_VBIOS,
+	AMDGIM_ERROR_VF_GPU_POST_ERROR,
+	AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL,
+	AMDGIM_ERROR_VF_FENCE_INIT_FAIL,
+
+	AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL,
+	AMDGIM_ERROR_VF_IB_INIT_FAIL,
+	AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL,
+	AMDGIM_ERROR_VF_ASIC_RESUME_FAIL,
+	AMDGIM_ERROR_VF_GPU_RESET_FAIL,
+
+	AMDGIM_ERROR_VF_TEST,
+	AMDGIM_ERROR_VF_MAX
+};
+
+enum AMDGIM_ERROR_CATEGORY {
+	AMDGIM_ERROR_CATEGORY_NON_USED = 0,
+	AMDGIM_ERROR_CATEGORY_GIM,
+	AMDGIM_ERROR_CATEGORY_PF,
+	AMDGIM_ERROR_CATEGORY_VF,
+	AMDGIM_ERROR_CATEGORY_VBIOS,
+	AMDGIM_ERROR_CATEGORY_MONITOR,
+
+	AMDGIM_ERROR_CATEGORY_MAX
+};
+
+void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data);
+void amdgpu_vf_error_trans_all (struct amdgpu_device *adev);
+
+#endif /* __VF_ERROR_H__ */

+ 1 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h

@@ -43,6 +43,7 @@ struct amdgpu_virt_ops {
 	int (*req_full_gpu)(struct amdgpu_device *adev, bool init);
 	int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
 	int (*reset_gpu)(struct amdgpu_device *adev);
+	void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
 };
 
 /* GPU virtualization */

+ 193 - 96
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

@@ -77,8 +77,6 @@ struct amdgpu_pte_update_params {
 	void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
 		     uint64_t addr, unsigned count, uint32_t incr,
 		     uint64_t flags);
-	/* indicate update pt or its shadow */
-	bool shadow;
 	/* The next two are used during VM update by CPU
 	 *  DMA addresses to use for mapping
 	 *  Kernel pointer of PD/PT BO that needs to be updated
@@ -161,11 +159,17 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
  */
 static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
 				    int (*validate)(void *, struct amdgpu_bo *),
-				    void *param)
+				    void *param, bool use_cpu_for_update)
 {
 	unsigned i;
 	int r;
 
+	if (use_cpu_for_update) {
+		r = amdgpu_bo_kmap(parent->bo, NULL);
+		if (r)
+			return r;
+	}
+
 	if (!parent->entries)
 		return 0;
 
@@ -183,7 +187,8 @@ static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
 		 * Recurse into the sub directory. This is harmless because we
 		 * have only a maximum of 5 layers.
 		 */
-		r = amdgpu_vm_validate_level(entry, validate, param);
+		r = amdgpu_vm_validate_level(entry, validate, param,
+					     use_cpu_for_update);
 		if (r)
 			return r;
 	}
@@ -214,7 +219,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	if (num_evictions == vm->last_eviction_counter)
 		return 0;
 
-	return amdgpu_vm_validate_level(&vm->root, validate, param);
+	return amdgpu_vm_validate_level(&vm->root, validate, param,
+					vm->use_cpu_for_update);
 }
 
 /**
@@ -331,6 +337,14 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
 			if (r)
 				return r;
 
+			if (vm->use_cpu_for_update) {
+				r = amdgpu_bo_kmap(pt, NULL);
+				if (r) {
+					amdgpu_bo_unref(&pt);
+					return r;
+				}
+			}
+
 			/* Keep a reference to the root directory to avoid
 			* freeing them up in the wrong order.
 			*/
@@ -338,6 +352,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
 
 			entry->bo = pt;
 			entry->addr = 0;
+			entry->huge_page = false;
 		}
 
 		if (level < adev->vm_manager.num_level) {
@@ -424,7 +439,7 @@ static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm,
 	struct dma_fence *updates = sync->last_vm_update;
 	int r = 0;
 	struct dma_fence *flushed, *tmp;
-	bool needs_flush = false;
+	bool needs_flush = vm->use_cpu_for_update;
 
 	flushed  = id->flushed_updates;
 	if ((amdgpu_vm_had_gpu_reset(adev, id)) ||
@@ -545,11 +560,11 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 	}
 	kfree(fences);
 
-	job->vm_needs_flush = false;
+	job->vm_needs_flush = vm->use_cpu_for_update;
 	/* Check if we can use a VMID already assigned to this VM */
 	list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
 		struct dma_fence *flushed;
-		bool needs_flush = false;
+		bool needs_flush = vm->use_cpu_for_update;
 
 		/* Check all the prerequisites to using this VMID */
 		if (amdgpu_vm_had_gpu_reset(adev, id))
@@ -745,7 +760,7 @@ static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
  *
  * Emit a VM flush when it is necessary.
  */
-int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
 {
 	struct amdgpu_device *adev = ring->adev;
 	unsigned vmhub = ring->funcs->vmhub;
@@ -767,12 +782,15 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
 		vm_flush_needed = true;
 	}
 
-	if (!vm_flush_needed && !gds_switch_needed)
+	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
 		return 0;
 
 	if (ring->funcs->init_cond_exec)
 		patch_offset = amdgpu_ring_init_cond_exec(ring);
 
+	if (need_pipe_sync)
+		amdgpu_ring_emit_pipeline_sync(ring);
+
 	if (ring->funcs->emit_vm_flush && vm_flush_needed) {
 		struct dma_fence *fence;
 
@@ -981,6 +999,8 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
 	unsigned int i;
 	uint64_t value;
 
+	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+
 	for (i = 0; i < count; i++) {
 		value = params->pages_addr ?
 			amdgpu_vm_map_gart(params->pages_addr, addr) :
@@ -989,19 +1009,16 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
 					i, value, flags);
 		addr += incr;
 	}
-
-	/* Flush HDP */
-	mb();
-	amdgpu_gart_flush_gpu_tlb(params->adev, 0);
 }
 
-static int amdgpu_vm_bo_wait(struct amdgpu_device *adev, struct amdgpu_bo *bo)
+static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+			     void *owner)
 {
 	struct amdgpu_sync sync;
 	int r;
 
 	amdgpu_sync_create(&sync);
-	amdgpu_sync_resv(adev, &sync, bo->tbo.resv, AMDGPU_FENCE_OWNER_VM);
+	amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.resv, owner);
 	r = amdgpu_sync_wait(&sync, true);
 	amdgpu_sync_free(&sync);
 
@@ -1042,16 +1059,12 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
 	params.adev = adev;
 	shadow = parent->bo->shadow;
 
-	WARN_ON(vm->use_cpu_for_update && shadow);
-	if (vm->use_cpu_for_update && !shadow) {
-		r = amdgpu_bo_kmap(parent->bo, (void **)&pd_addr);
-		if (r)
-			return r;
-		r = amdgpu_vm_bo_wait(adev, parent->bo);
-		if (unlikely(r)) {
-			amdgpu_bo_kunmap(parent->bo);
+	if (vm->use_cpu_for_update) {
+		pd_addr = (unsigned long)parent->bo->kptr;
+		r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
+		if (unlikely(r))
 			return r;
-		}
+
 		params.func = amdgpu_vm_cpu_set_ptes;
 	} else {
 		if (shadow) {
@@ -1105,7 +1118,8 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
 
 		pt = amdgpu_bo_gpu_offset(bo);
 		pt = amdgpu_gart_get_vm_pde(adev, pt);
-		if (parent->entries[pt_idx].addr == pt)
+		if (parent->entries[pt_idx].addr == pt ||
+		    parent->entries[pt_idx].huge_page)
 			continue;
 
 		parent->entries[pt_idx].addr = pt;
@@ -1146,28 +1160,29 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
 			    count, incr, AMDGPU_PTE_VALID);
 	}
 
-	if (params.func == amdgpu_vm_cpu_set_ptes)
-		amdgpu_bo_kunmap(parent->bo);
-	else if (params.ib->length_dw == 0) {
-		amdgpu_job_free(job);
-	} else {
-		amdgpu_ring_pad_ib(ring, params.ib);
-		amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv,
-				 AMDGPU_FENCE_OWNER_VM);
-		if (shadow)
-			amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
+	if (!vm->use_cpu_for_update) {
+		if (params.ib->length_dw == 0) {
+			amdgpu_job_free(job);
+		} else {
+			amdgpu_ring_pad_ib(ring, params.ib);
+			amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv,
 					 AMDGPU_FENCE_OWNER_VM);
+			if (shadow)
+				amdgpu_sync_resv(adev, &job->sync,
+						 shadow->tbo.resv,
+						 AMDGPU_FENCE_OWNER_VM);
+
+			WARN_ON(params.ib->length_dw > ndw);
+			r = amdgpu_job_submit(job, ring, &vm->entity,
+					AMDGPU_FENCE_OWNER_VM, &fence);
+			if (r)
+				goto error_free;
 
-		WARN_ON(params.ib->length_dw > ndw);
-		r = amdgpu_job_submit(job, ring, &vm->entity,
-				AMDGPU_FENCE_OWNER_VM, &fence);
-		if (r)
-			goto error_free;
-
-		amdgpu_bo_fence(parent->bo, fence, true);
-		dma_fence_put(vm->last_dir_update);
-		vm->last_dir_update = dma_fence_get(fence);
-		dma_fence_put(fence);
+			amdgpu_bo_fence(parent->bo, fence, true);
+			dma_fence_put(vm->last_dir_update);
+			vm->last_dir_update = dma_fence_get(fence);
+			dma_fence_put(fence);
+		}
 	}
 	/*
 	 * Recurse into the subdirectories. This recursion is harmless because
@@ -1235,33 +1250,105 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
 	if (r)
 		amdgpu_vm_invalidate_level(&vm->root);
 
+	if (vm->use_cpu_for_update) {
+		/* Flush HDP */
+		mb();
+		amdgpu_gart_flush_gpu_tlb(adev, 0);
+	}
+
 	return r;
 }
 
 /**
- * amdgpu_vm_find_pt - find the page table for an address
+ * amdgpu_vm_find_entry - find the entry for an address
  *
  * @p: see amdgpu_pte_update_params definition
  * @addr: virtual address in question
+ * @entry: resulting entry or NULL
+ * @parent: parent entry
  *
- * Find the page table BO for a virtual address, return NULL when none found.
+ * Find the vm_pt entry and it's parent for the given address.
  */
-static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p,
-					  uint64_t addr)
+void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
+			 struct amdgpu_vm_pt **entry,
+			 struct amdgpu_vm_pt **parent)
 {
-	struct amdgpu_vm_pt *entry = &p->vm->root;
 	unsigned idx, level = p->adev->vm_manager.num_level;
 
-	while (entry->entries) {
+	*parent = NULL;
+	*entry = &p->vm->root;
+	while ((*entry)->entries) {
 		idx = addr >> (p->adev->vm_manager.block_size * level--);
-		idx %= amdgpu_bo_size(entry->bo) / 8;
-		entry = &entry->entries[idx];
+		idx %= amdgpu_bo_size((*entry)->bo) / 8;
+		*parent = *entry;
+		*entry = &(*entry)->entries[idx];
 	}
 
 	if (level)
-		return NULL;
+		*entry = NULL;
+}
+
+/**
+ * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
+ *
+ * @p: see amdgpu_pte_update_params definition
+ * @entry: vm_pt entry to check
+ * @parent: parent entry
+ * @nptes: number of PTEs updated with this operation
+ * @dst: destination address where the PTEs should point to
+ * @flags: access flags fro the PTEs
+ *
+ * Check if we can update the PD with a huge page.
+ */
+static int amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
+				       struct amdgpu_vm_pt *entry,
+				       struct amdgpu_vm_pt *parent,
+				       unsigned nptes, uint64_t dst,
+				       uint64_t flags)
+{
+	bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes);
+	uint64_t pd_addr, pde;
+	int r;
 
-	return entry->bo;
+	/* In the case of a mixed PT the PDE must point to it*/
+	if (p->adev->asic_type < CHIP_VEGA10 ||
+	    nptes != AMDGPU_VM_PTE_COUNT(p->adev) ||
+	    p->func == amdgpu_vm_do_copy_ptes ||
+	    !(flags & AMDGPU_PTE_VALID)) {
+
+		dst = amdgpu_bo_gpu_offset(entry->bo);
+		dst = amdgpu_gart_get_vm_pde(p->adev, dst);
+		flags = AMDGPU_PTE_VALID;
+	} else {
+		flags |= AMDGPU_PDE_PTE;
+	}
+
+	if (entry->addr == dst &&
+	    entry->huge_page == !!(flags & AMDGPU_PDE_PTE))
+		return 0;
+
+	entry->addr = dst;
+	entry->huge_page = !!(flags & AMDGPU_PDE_PTE);
+
+	if (use_cpu_update) {
+		r = amdgpu_bo_kmap(parent->bo, (void *)&pd_addr);
+		if (r)
+			return r;
+
+		pde = pd_addr + (entry - parent->entries) * 8;
+		amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
+	} else {
+		if (parent->bo->shadow) {
+			pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow);
+			pde = pd_addr + (entry - parent->entries) * 8;
+			amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
+		}
+		pd_addr = amdgpu_bo_gpu_offset(parent->bo);
+		pde = pd_addr + (entry - parent->entries) * 8;
+		amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
+	}
+
+	return 0;
 }
 
 /**
@@ -1287,49 +1374,47 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
 	uint64_t addr, pe_start;
 	struct amdgpu_bo *pt;
 	unsigned nptes;
-	int r;
 	bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes);
-
+	int r;
 
 	/* walk over the address space and update the page tables */
-	for (addr = start; addr < end; addr += nptes) {
-		pt = amdgpu_vm_get_pt(params, addr);
-		if (!pt) {
-			pr_err("PT not found, aborting update_ptes\n");
-			return -EINVAL;
-		}
+	for (addr = start; addr < end; addr += nptes,
+	     dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
+		struct amdgpu_vm_pt *entry, *parent;
 
-		if (params->shadow) {
-			if (WARN_ONCE(use_cpu_update,
-				"CPU VM update doesn't suuport shadow pages"))
-				return 0;
-
-			if (!pt->shadow)
-				return 0;
-			pt = pt->shadow;
-		}
+		amdgpu_vm_get_entry(params, addr, &entry, &parent);
+		if (!entry)
+			return -ENOENT;
 
 		if ((addr & ~mask) == (end & ~mask))
 			nptes = end - addr;
 		else
 			nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
 
+		r = amdgpu_vm_handle_huge_pages(params, entry, parent,
+						nptes, dst, flags);
+		if (r)
+			return r;
+
+		if (entry->huge_page)
+			continue;
+
+		pt = entry->bo;
 		if (use_cpu_update) {
-			r = amdgpu_bo_kmap(pt, (void *)&pe_start);
-			if (r)
-				return r;
-		} else
+			pe_start = (unsigned long)pt->kptr;
+		} else {
+			if (pt->shadow) {
+				pe_start = amdgpu_bo_gpu_offset(pt->shadow);
+				pe_start += (addr & mask) * 8;
+				params->func(params, pe_start, dst, nptes,
+					     AMDGPU_GPU_PAGE_SIZE, flags);
+			}
 			pe_start = amdgpu_bo_gpu_offset(pt);
+		}
 
 		pe_start += (addr & mask) * 8;
-
 		params->func(params, pe_start, dst, nptes,
 			     AMDGPU_GPU_PAGE_SIZE, flags);
-
-		dst += nptes * AMDGPU_GPU_PAGE_SIZE;
-
-		if (use_cpu_update)
-			amdgpu_bo_kunmap(pt);
 	}
 
 	return 0;
@@ -1372,8 +1457,9 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params	*params,
 	 */
 
 	/* SI and newer are optimized for 64KB */
-	uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG);
-	uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG;
+	unsigned pages_per_frag = AMDGPU_LOG2_PAGES_PER_FRAG(params->adev);
+	uint64_t frag_flags = AMDGPU_PTE_FRAG(pages_per_frag);
+	uint64_t frag_align = 1 << pages_per_frag;
 
 	uint64_t frag_start = ALIGN(start, frag_align);
 	uint64_t frag_end = end & ~(frag_align - 1);
@@ -1445,6 +1531,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 	params.vm = vm;
 	params.src = src;
 
+	/* sync to everything on unmapping */
+	if (!(flags & AMDGPU_PTE_VALID))
+		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
+
 	if (vm->use_cpu_for_update) {
 		/* params.src is used as flag to indicate system Memory */
 		if (pages_addr)
@@ -1453,23 +1543,18 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 		/* Wait for PT BOs to be free. PTs share the same resv. object
 		 * as the root PD BO
 		 */
-		r = amdgpu_vm_bo_wait(adev, vm->root.bo);
+		r = amdgpu_vm_wait_pd(adev, vm, owner);
 		if (unlikely(r))
 			return r;
 
 		params.func = amdgpu_vm_cpu_set_ptes;
 		params.pages_addr = pages_addr;
-		params.shadow = false;
 		return amdgpu_vm_frag_ptes(&params, start, last + 1,
 					   addr, flags);
 	}
 
 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
 
-	/* sync to everything on unmapping */
-	if (!(flags & AMDGPU_PTE_VALID))
-		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
-
 	nptes = last - start + 1;
 
 	/*
@@ -1481,6 +1566,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 	/* padding, etc. */
 	ndw = 64;
 
+	/* one PDE write for each huge page */
+	ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6;
+
 	if (src) {
 		/* only copy commands needed */
 		ndw += ncmds * 7;
@@ -1542,11 +1630,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 	if (r)
 		goto error_free;
 
-	params.shadow = true;
-	r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
-	if (r)
-		goto error_free;
-	params.shadow = false;
 	r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
 	if (r)
 		goto error_free;
@@ -1565,6 +1648,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 
 error_free:
 	amdgpu_job_free(job);
+	amdgpu_vm_invalidate_level(&vm->root);
 	return r;
 }
 
@@ -1752,6 +1836,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 		list_add(&bo_va->vm_status, &vm->cleared);
 	spin_unlock(&vm->status_lock);
 
+	if (vm->use_cpu_for_update) {
+		/* Flush HDP */
+		mb();
+		amdgpu_gart_flush_gpu_tlb(adev, 0);
+	}
+
 	return 0;
 }
 
@@ -2457,6 +2547,13 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		goto error_free_root;
 
 	vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
+
+	if (vm->use_cpu_for_update) {
+		r = amdgpu_bo_kmap(vm->root.bo, NULL);
+		if (r)
+			goto error_free_root;
+	}
+
 	amdgpu_bo_unreserve(vm->root.bo);
 
 	return 0;

+ 8 - 2
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h

@@ -51,7 +51,9 @@ struct amdgpu_bo_list_entry;
 #define AMDGPU_VM_PTB_ALIGN_SIZE   32768
 
 /* LOG2 number of continuous pages for the fragment field */
-#define AMDGPU_LOG2_PAGES_PER_FRAG 4
+#define AMDGPU_LOG2_PAGES_PER_FRAG(adev) \
+	((adev)->asic_type < CHIP_VEGA10 ? 4 : \
+	 (adev)->vm_manager.block_size)
 
 #define AMDGPU_PTE_VALID	(1ULL << 0)
 #define AMDGPU_PTE_SYSTEM	(1ULL << 1)
@@ -68,6 +70,9 @@ struct amdgpu_bo_list_entry;
 /* TILED for VEGA10, reserved for older ASICs  */
 #define AMDGPU_PTE_PRT		(1ULL << 51)
 
+/* PDE is handled as PTE for VEGA10 */
+#define AMDGPU_PDE_PTE		(1ULL << 54)
+
 /* VEGA10 only */
 #define AMDGPU_PTE_MTYPE(a)    ((uint64_t)a << 57)
 #define AMDGPU_PTE_MTYPE_MASK	AMDGPU_PTE_MTYPE(3ULL)
@@ -98,6 +103,7 @@ struct amdgpu_bo_list_entry;
 struct amdgpu_vm_pt {
 	struct amdgpu_bo	*bo;
 	uint64_t		addr;
+	bool			huge_page;
 
 	/* array of page tables, one for each directory entry */
 	struct amdgpu_vm_pt	*entries;
@@ -222,7 +228,7 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 		      struct amdgpu_sync *sync, struct dma_fence *fence,
 		      struct amdgpu_job *job);
-int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
 			unsigned vmid);
 void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev);

+ 1 - 8
drivers/gpu/drm/amd/amdgpu/cik.c

@@ -1824,21 +1824,14 @@ static int cik_common_suspend(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	amdgpu_amdkfd_suspend(adev);
-
 	return cik_common_hw_fini(adev);
 }
 
 static int cik_common_resume(void *handle)
 {
-	int r;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	r = cik_common_hw_init(adev);
-	if (r)
-		return r;
-
-	return amdgpu_amdkfd_resume(adev);
+	return cik_common_hw_init(adev);
 }
 
 static bool cik_common_is_idle(void *handle)

+ 60 - 0
drivers/gpu/drm/amd/amdgpu/cik_sdma.c

@@ -341,6 +341,63 @@ static void cik_sdma_rlc_stop(struct amdgpu_device *adev)
 	/* XXX todo */
 }
 
+/**
+ * cik_ctx_switch_enable - stop the async dma engines context switch
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable/disable the DMA MEs context switch.
+ *
+ * Halt or unhalt the async dma engines context switch (VI).
+ */
+static void cik_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
+{
+	u32 f32_cntl, phase_quantum = 0;
+	int i;
+
+	if (amdgpu_sdma_phase_quantum) {
+		unsigned value = amdgpu_sdma_phase_quantum;
+		unsigned unit = 0;
+
+		while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
+				SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
+			value = (value + 1) >> 1;
+			unit++;
+		}
+		if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
+			    SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
+			value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
+				 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
+			unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
+				SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
+			WARN_ONCE(1,
+			"clamping sdma_phase_quantum to %uK clock cycles\n",
+				  value << unit);
+		}
+		phase_quantum =
+			value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
+			unit  << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
+	}
+
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
+		if (enable) {
+			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+					AUTO_CTXSW_ENABLE, 1);
+			if (amdgpu_sdma_phase_quantum) {
+				WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i],
+				       phase_quantum);
+				WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i],
+				       phase_quantum);
+			}
+		} else {
+			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+					AUTO_CTXSW_ENABLE, 0);
+		}
+
+		WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl);
+	}
+}
+
 /**
  * cik_sdma_enable - stop the async dma engines
  *
@@ -537,6 +594,8 @@ static int cik_sdma_start(struct amdgpu_device *adev)
 
 	/* halt the engine before programing */
 	cik_sdma_enable(adev, false);
+	/* enable sdma ring preemption */
+	cik_ctx_switch_enable(adev, true);
 
 	/* start the gfx rings and rlc compute queues */
 	r = cik_sdma_gfx_resume(adev);
@@ -984,6 +1043,7 @@ static int cik_sdma_hw_fini(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	cik_ctx_switch_enable(adev, false);
 	cik_sdma_enable(adev, false);
 
 	return 0;

+ 21 - 20
drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h

@@ -1,24 +1,25 @@
-
 /*
-***************************************************************************************************
-*
-*  Trade secret of Advanced Micro Devices, Inc.
-*  Copyright (c) 2010 Advanced Micro Devices, Inc. (unpublished)
-*
-*  All rights reserved.  This notice is intended as a precaution against inadvertent publication and
-*  does not imply publication or any waiver of confidentiality.  The year included in the foregoing
-*  notice is the year of creation of the work.
-*
-***************************************************************************************************
-*/
-/**
-***************************************************************************************************
-* @brief gfx9 Clearstate Definitions
-***************************************************************************************************
-*
-*   Do not edit! This is a machine-generated file!
-*
-*/
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
 
 static const unsigned int gfx9_SECT_CONTEXT_def_1[] =
 {

+ 2 - 131
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c

@@ -484,134 +484,6 @@ static bool dce_v10_0_is_display_hung(struct amdgpu_device *adev)
 	return true;
 }
 
-static void dce_v10_0_stop_mc_access(struct amdgpu_device *adev,
-				     struct amdgpu_mode_mc_save *save)
-{
-	u32 crtc_enabled, tmp;
-	int i;
-
-	save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
-	save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
-
-	/* disable VGA render */
-	tmp = RREG32(mmVGA_RENDER_CONTROL);
-	tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
-	WREG32(mmVGA_RENDER_CONTROL, tmp);
-
-	/* blank the display controllers */
-	for (i = 0; i < adev->mode_info.num_crtc; i++) {
-		crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
-					     CRTC_CONTROL, CRTC_MASTER_EN);
-		if (crtc_enabled) {
-#if 0
-			u32 frame_count;
-			int j;
-
-			save->crtc_enabled[i] = true;
-			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
-			if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
-				amdgpu_display_vblank_wait(adev, i);
-				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
-				tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
-				WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
-				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
-			}
-			/* wait for the next frame */
-			frame_count = amdgpu_display_vblank_get_counter(adev, i);
-			for (j = 0; j < adev->usec_timeout; j++) {
-				if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
-					break;
-				udelay(1);
-			}
-			tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
-			if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
-				tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
-				WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
-			}
-			tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
-			if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
-				tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
-				WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
-			}
-#else
-			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
-			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
-			tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
-			tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
-			WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
-			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
-			save->crtc_enabled[i] = false;
-			/* ***** */
-#endif
-		} else {
-			save->crtc_enabled[i] = false;
-		}
-	}
-}
-
-static void dce_v10_0_resume_mc_access(struct amdgpu_device *adev,
-				       struct amdgpu_mode_mc_save *save)
-{
-	u32 tmp, frame_count;
-	int i, j;
-
-	/* update crtc base addresses */
-	for (i = 0; i < adev->mode_info.num_crtc; i++) {
-		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
-		       upper_32_bits(adev->mc.vram_start));
-		WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
-		       upper_32_bits(adev->mc.vram_start));
-		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
-		       (u32)adev->mc.vram_start);
-		WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
-		       (u32)adev->mc.vram_start);
-
-		if (save->crtc_enabled[i]) {
-			tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
-			if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 0) {
-				tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 0);
-				WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
-			}
-			tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
-			if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
-				tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
-				WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
-			}
-			tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
-			if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
-				tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
-				WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
-			}
-			for (j = 0; j < adev->usec_timeout; j++) {
-				tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
-				if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
-					break;
-				udelay(1);
-			}
-			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
-			tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
-			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
-			WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
-			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
-			/* wait for the next frame */
-			frame_count = amdgpu_display_vblank_get_counter(adev, i);
-			for (j = 0; j < adev->usec_timeout; j++) {
-				if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
-					break;
-				udelay(1);
-			}
-		}
-	}
-
-	WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
-	WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
-
-	/* Unlock vga access */
-	WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
-	mdelay(1);
-	WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
-}
-
 static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev,
 					   bool render)
 {
@@ -3025,6 +2897,8 @@ static int dce_v10_0_hw_init(void *handle)
 
 	dce_v10_0_init_golden_registers(adev);
 
+	/* disable vga render */
+	dce_v10_0_set_vga_render_state(adev, false);
 	/* init dig PHYs, disp eng pll */
 	amdgpu_atombios_encoder_init_dig(adev);
 	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
@@ -3737,7 +3611,6 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
 }
 
 static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
-	.set_vga_render_state = &dce_v10_0_set_vga_render_state,
 	.bandwidth_update = &dce_v10_0_bandwidth_update,
 	.vblank_get_counter = &dce_v10_0_vblank_get_counter,
 	.vblank_wait = &dce_v10_0_vblank_wait,
@@ -3750,8 +3623,6 @@ static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
 	.page_flip_get_scanoutpos = &dce_v10_0_crtc_get_scanoutpos,
 	.add_encoder = &dce_v10_0_encoder_add,
 	.add_connector = &amdgpu_connector_add,
-	.stop_mc_access = &dce_v10_0_stop_mc_access,
-	.resume_mc_access = &dce_v10_0_resume_mc_access,
 };
 
 static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev)

+ 2 - 76
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c

@@ -499,79 +499,6 @@ static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev)
 	return true;
 }
 
-static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev,
-				     struct amdgpu_mode_mc_save *save)
-{
-	u32 crtc_enabled, tmp;
-	int i;
-
-	save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
-	save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
-
-	/* disable VGA render */
-	tmp = RREG32(mmVGA_RENDER_CONTROL);
-	tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
-	WREG32(mmVGA_RENDER_CONTROL, tmp);
-
-	/* blank the display controllers */
-	for (i = 0; i < adev->mode_info.num_crtc; i++) {
-		crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
-					     CRTC_CONTROL, CRTC_MASTER_EN);
-		if (crtc_enabled) {
-#if 1
-			save->crtc_enabled[i] = true;
-			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
-			if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
-				/*it is correct only for RGB ; black is 0*/
-				WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
-				tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
-				WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
-			}
-#else
-			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
-			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
-			tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
-			tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
-			WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
-			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
-			save->crtc_enabled[i] = false;
-			/* ***** */
-#endif
-		} else {
-			save->crtc_enabled[i] = false;
-		}
-	}
-}
-
-static void dce_v11_0_resume_mc_access(struct amdgpu_device *adev,
-				       struct amdgpu_mode_mc_save *save)
-{
-	u32 tmp;
-	int i;
-
-	/* update crtc base addresses */
-	for (i = 0; i < adev->mode_info.num_crtc; i++) {
-		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
-		       upper_32_bits(adev->mc.vram_start));
-		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
-		       (u32)adev->mc.vram_start);
-
-		if (save->crtc_enabled[i]) {
-			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
-			tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
-			WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
-		}
-	}
-
-	WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
-	WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
-
-	/* Unlock vga access */
-	WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
-	mdelay(1);
-	WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
-}
-
 static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev,
 					   bool render)
 {
@@ -3086,6 +3013,8 @@ static int dce_v11_0_hw_init(void *handle)
 
 	dce_v11_0_init_golden_registers(adev);
 
+	/* disable vga render */
+	dce_v11_0_set_vga_render_state(adev, false);
 	/* init dig PHYs, disp eng pll */
 	amdgpu_atombios_crtc_powergate_init(adev);
 	amdgpu_atombios_encoder_init_dig(adev);
@@ -3806,7 +3735,6 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
 }
 
 static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
-	.set_vga_render_state = &dce_v11_0_set_vga_render_state,
 	.bandwidth_update = &dce_v11_0_bandwidth_update,
 	.vblank_get_counter = &dce_v11_0_vblank_get_counter,
 	.vblank_wait = &dce_v11_0_vblank_wait,
@@ -3819,8 +3747,6 @@ static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
 	.page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos,
 	.add_encoder = &dce_v11_0_encoder_add,
 	.add_connector = &amdgpu_connector_add,
-	.stop_mc_access = &dce_v11_0_stop_mc_access,
-	.resume_mc_access = &dce_v11_0_resume_mc_access,
 };
 
 static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev)

+ 2 - 114
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c

@@ -392,117 +392,6 @@ static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
 	return mmDC_GPIO_HPD_A;
 }
 
-static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
-{
-	if (crtc >= adev->mode_info.num_crtc)
-		return 0;
-	else
-		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
-}
-
-static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
-				    struct amdgpu_mode_mc_save *save)
-{
-	u32 crtc_enabled, tmp, frame_count;
-	int i, j;
-
-	save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
-	save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
-
-	/* disable VGA render */
-	WREG32(mmVGA_RENDER_CONTROL, 0);
-
-	/* blank the display controllers */
-	for (i = 0; i < adev->mode_info.num_crtc; i++) {
-		crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK;
-		if (crtc_enabled) {
-			save->crtc_enabled[i] = true;
-			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
-
-			if (!(tmp & CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK)) {
-				dce_v6_0_vblank_wait(adev, i);
-				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
-				tmp |= CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK;
-				WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
-				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
-			}
-			/* wait for the next frame */
-			frame_count = evergreen_get_vblank_counter(adev, i);
-			for (j = 0; j < adev->usec_timeout; j++) {
-				if (evergreen_get_vblank_counter(adev, i) != frame_count)
-					break;
-				udelay(1);
-			}
-
-			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
-			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
-			tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
-			tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
-			WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
-			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
-			save->crtc_enabled[i] = false;
-			/* ***** */
-		} else {
-			save->crtc_enabled[i] = false;
-		}
-	}
-}
-
-static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
-				      struct amdgpu_mode_mc_save *save)
-{
-	u32 tmp;
-	int i, j;
-
-	/* update crtc base addresses */
-	for (i = 0; i < adev->mode_info.num_crtc; i++) {
-		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
-		       upper_32_bits(adev->mc.vram_start));
-		WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
-		       upper_32_bits(adev->mc.vram_start));
-		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
-		       (u32)adev->mc.vram_start);
-		WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
-		       (u32)adev->mc.vram_start);
-	}
-
-	WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
-	WREG32(mmVGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
-
-	/* unlock regs and wait for update */
-	for (i = 0; i < adev->mode_info.num_crtc; i++) {
-		if (save->crtc_enabled[i]) {
-			tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
-			if ((tmp & 0x7) != 0) {
-				tmp &= ~0x7;
-				WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
-			}
-			tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
-			if (tmp & GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK) {
-				tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
-				WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
-			}
-			tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
-			if (tmp & 1) {
-				tmp &= ~1;
-				WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
-			}
-			for (j = 0; j < adev->usec_timeout; j++) {
-				tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
-				if ((tmp & GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) == 0)
-					break;
-				udelay(1);
-			}
-		}
-	}
-
-	/* Unlock vga access */
-	WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
-	mdelay(1);
-	WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
-
-}
-
 static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
 					  bool render)
 {
@@ -2873,6 +2762,8 @@ static int dce_v6_0_hw_init(void *handle)
 	int i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	/* disable vga render */
+	dce_v6_0_set_vga_render_state(adev, false);
 	/* init dig PHYs, disp eng pll */
 	amdgpu_atombios_encoder_init_dig(adev);
 	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
@@ -3525,7 +3416,6 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
 }
 
 static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
-	.set_vga_render_state = &dce_v6_0_set_vga_render_state,
 	.bandwidth_update = &dce_v6_0_bandwidth_update,
 	.vblank_get_counter = &dce_v6_0_vblank_get_counter,
 	.vblank_wait = &dce_v6_0_vblank_wait,
@@ -3538,8 +3428,6 @@ static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
 	.page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
 	.add_encoder = &dce_v6_0_encoder_add,
 	.add_connector = &amdgpu_connector_add,
-	.stop_mc_access = &dce_v6_0_stop_mc_access,
-	.resume_mc_access = &dce_v6_0_resume_mc_access,
 };
 
 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)

+ 2 - 78
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c

@@ -419,81 +419,6 @@ static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
 	return true;
 }
 
-static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
-				    struct amdgpu_mode_mc_save *save)
-{
-	u32 crtc_enabled, tmp;
-	int i;
-
-	save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
-	save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
-
-	/* disable VGA render */
-	tmp = RREG32(mmVGA_RENDER_CONTROL);
-	tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
-	WREG32(mmVGA_RENDER_CONTROL, tmp);
-
-	/* blank the display controllers */
-	for (i = 0; i < adev->mode_info.num_crtc; i++) {
-		crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
-					     CRTC_CONTROL, CRTC_MASTER_EN);
-		if (crtc_enabled) {
-#if 1
-			save->crtc_enabled[i] = true;
-			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
-			if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
-				/*it is correct only for RGB ; black is 0*/
-				WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
-				tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
-				WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
-			}
-			mdelay(20);
-#else
-			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
-			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
-			tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
-			tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
-			WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
-			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
-			save->crtc_enabled[i] = false;
-			/* ***** */
-#endif
-		} else {
-			save->crtc_enabled[i] = false;
-		}
-	}
-}
-
-static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev,
-				      struct amdgpu_mode_mc_save *save)
-{
-	u32 tmp;
-	int i;
-
-	/* update crtc base addresses */
-	for (i = 0; i < adev->mode_info.num_crtc; i++) {
-		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
-		       upper_32_bits(adev->mc.vram_start));
-		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
-		       (u32)adev->mc.vram_start);
-
-		if (save->crtc_enabled[i]) {
-			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
-			tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
-			WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
-		}
-		mdelay(20);
-	}
-
-	WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
-	WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
-
-	/* Unlock vga access */
-	WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
-	mdelay(1);
-	WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
-}
-
 static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
 					  bool render)
 {
@@ -2870,6 +2795,8 @@ static int dce_v8_0_hw_init(void *handle)
 	int i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	/* disable vga render */
+	dce_v8_0_set_vga_render_state(adev, false);
 	/* init dig PHYs, disp eng pll */
 	amdgpu_atombios_encoder_init_dig(adev);
 	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
@@ -3574,7 +3501,6 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
 }
 
 static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
-	.set_vga_render_state = &dce_v8_0_set_vga_render_state,
 	.bandwidth_update = &dce_v8_0_bandwidth_update,
 	.vblank_get_counter = &dce_v8_0_vblank_get_counter,
 	.vblank_wait = &dce_v8_0_vblank_wait,
@@ -3587,8 +3513,6 @@ static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
 	.page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
 	.add_encoder = &dce_v8_0_encoder_add,
 	.add_connector = &amdgpu_connector_add,
-	.stop_mc_access = &dce_v8_0_stop_mc_access,
-	.resume_mc_access = &dce_v8_0_resume_mc_access,
 };
 
 static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)

+ 40 - 60
drivers/gpu/drm/amd/amdgpu/dce_virtual.c

@@ -95,62 +95,6 @@ static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev)
 	return 0;
 }
 
-static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
-			      struct amdgpu_mode_mc_save *save)
-{
-	switch (adev->asic_type) {
-#ifdef CONFIG_DRM_AMDGPU_SI
-	case CHIP_TAHITI:
-	case CHIP_PITCAIRN:
-	case CHIP_VERDE:
-	case CHIP_OLAND:
-		dce_v6_0_disable_dce(adev);
-		break;
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
-	case CHIP_BONAIRE:
-	case CHIP_HAWAII:
-	case CHIP_KAVERI:
-	case CHIP_KABINI:
-	case CHIP_MULLINS:
-		dce_v8_0_disable_dce(adev);
-		break;
-#endif
-	case CHIP_FIJI:
-	case CHIP_TONGA:
-		dce_v10_0_disable_dce(adev);
-		break;
-	case CHIP_CARRIZO:
-	case CHIP_STONEY:
-	case CHIP_POLARIS10:
-	case CHIP_POLARIS11:
-	case CHIP_POLARIS12:
-		dce_v11_0_disable_dce(adev);
-		break;
-	case CHIP_TOPAZ:
-#ifdef CONFIG_DRM_AMDGPU_SI
-	case CHIP_HAINAN:
-#endif
-		/* no DCE */
-		return;
-	default:
-		DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);
-	}
-
-	return;
-}
-static void dce_virtual_resume_mc_access(struct amdgpu_device *adev,
-				struct amdgpu_mode_mc_save *save)
-{
-	return;
-}
-
-static void dce_virtual_set_vga_render_state(struct amdgpu_device *adev,
-				    bool render)
-{
-	return;
-}
-
 /**
  * dce_virtual_bandwidth_update - program display watermarks
  *
@@ -522,6 +466,45 @@ static int dce_virtual_sw_fini(void *handle)
 
 static int dce_virtual_hw_init(void *handle)
 {
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+	case CHIP_TAHITI:
+	case CHIP_PITCAIRN:
+	case CHIP_VERDE:
+	case CHIP_OLAND:
+		dce_v6_0_disable_dce(adev);
+		break;
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+	case CHIP_BONAIRE:
+	case CHIP_HAWAII:
+	case CHIP_KAVERI:
+	case CHIP_KABINI:
+	case CHIP_MULLINS:
+		dce_v8_0_disable_dce(adev);
+		break;
+#endif
+	case CHIP_FIJI:
+	case CHIP_TONGA:
+		dce_v10_0_disable_dce(adev);
+		break;
+	case CHIP_CARRIZO:
+	case CHIP_STONEY:
+	case CHIP_POLARIS11:
+	case CHIP_POLARIS10:
+		dce_v11_0_disable_dce(adev);
+		break;
+	case CHIP_TOPAZ:
+#ifdef CONFIG_DRM_AMDGPU_SI
+	case CHIP_HAINAN:
+#endif
+		/* no DCE */
+		break;
+	default:
+		DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);
+	}
 	return 0;
 }
 
@@ -677,7 +660,6 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
 }
 
 static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
-	.set_vga_render_state = &dce_virtual_set_vga_render_state,
 	.bandwidth_update = &dce_virtual_bandwidth_update,
 	.vblank_get_counter = &dce_virtual_vblank_get_counter,
 	.vblank_wait = &dce_virtual_vblank_wait,
@@ -690,8 +672,6 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
 	.page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos,
 	.add_encoder = NULL,
 	.add_connector = NULL,
-	.stop_mc_access = &dce_virtual_stop_mc_access,
-	.resume_mc_access = &dce_virtual_resume_mc_access,
 };
 
 static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
@@ -809,7 +789,7 @@ static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
 
 static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
 {
-	adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+	adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VBLANK6 + 1;
 	adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
 }
 

+ 1 - 1
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c

@@ -1573,7 +1573,7 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
 
 static void gfx_v6_0_scratch_init(struct amdgpu_device *adev)
 {
-	adev->gfx.scratch.num_reg = 7;
+	adev->gfx.scratch.num_reg = 8;
 	adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
 	adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
 }

+ 1 - 1
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c

@@ -2021,7 +2021,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
  */
 static void gfx_v7_0_scratch_init(struct amdgpu_device *adev)
 {
-	adev->gfx.scratch.num_reg = 7;
+	adev->gfx.scratch.num_reg = 8;
 	adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
 	adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
 }

+ 16 - 16
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c

@@ -193,8 +193,8 @@ static const u32 tonga_golden_common_all[] =
 	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
-	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
-	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
+	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
 };
 
 static const u32 tonga_mgcg_cgcg_init[] =
@@ -303,8 +303,8 @@ static const u32 polaris11_golden_common_all[] =
 	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
 	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
-	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
-	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
+	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 };
 
 static const u32 golden_settings_polaris10_a11[] =
@@ -336,8 +336,8 @@ static const u32 polaris10_golden_common_all[] =
 	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
-	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
-	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
+	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 };
 
 static const u32 fiji_golden_common_all[] =
@@ -348,8 +348,8 @@ static const u32 fiji_golden_common_all[] =
 	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
 	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
-	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
-	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
+	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
 	mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
 };
@@ -436,8 +436,8 @@ static const u32 iceland_golden_common_all[] =
 	mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
 	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
-	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
-	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
+	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
 };
 
 static const u32 iceland_mgcg_cgcg_init[] =
@@ -532,8 +532,8 @@ static const u32 cz_golden_common_all[] =
 	mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
 	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
-	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
-	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
+	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
 };
 
 static const u32 cz_mgcg_cgcg_init[] =
@@ -637,8 +637,8 @@ static const u32 stoney_golden_common_all[] =
 	mmGB_ADDR_CONFIG, 0xffffffff, 0x12010001,
 	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
 	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
-	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
-	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
+	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
 };
 
 static const u32 stoney_mgcg_cgcg_init[] =
@@ -750,7 +750,7 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
 
 static void gfx_v8_0_scratch_init(struct amdgpu_device *adev)
 {
-	adev->gfx.scratch.num_reg = 7;
+	adev->gfx.scratch.num_reg = 8;
 	adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
 	adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
 }
@@ -4564,7 +4564,7 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
 		/* This situation may be hit in the future if a new HW
 		 * generation exposes more than 64 queues. If so, the
 		 * definition of queue_mask needs updating */
-		if (WARN_ON(i > (sizeof(queue_mask)*8))) {
+		if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
 			DRM_ERROR("Invalid KCQ enabled: %d\n", i);
 			break;
 		}

+ 15 - 13
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c

@@ -211,7 +211,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
 
 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
 {
-	adev->gfx.scratch.num_reg = 7;
+	adev->gfx.scratch.num_reg = 8;
 	adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
 	adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
 }
@@ -1475,21 +1475,23 @@ static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
 
 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
 {
-	u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+	u32 data;
 
-	if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
-		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
-		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
-	} else if (se_num == 0xffffffff) {
-		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
+	if (instance == 0xffffffff)
+		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+	else
+		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
+
+	if (se_num == 0xffffffff)
 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
-	} else if (sh_num == 0xffffffff) {
-		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+	else
 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
-	} else {
+
+	if (sh_num == 0xffffffff)
+		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+	else
 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
-		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
-	}
+
 	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
 }
 
@@ -2425,7 +2427,7 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
 		/* This situation may be hit in the future if a new HW
 		 * generation exposes more than 64 queues. If so, the
 		 * definition of queue_mask needs updating */
-		if (WARN_ON(i > (sizeof(queue_mask)*8))) {
+		if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
 			DRM_ERROR("Invalid KCQ enabled: %d\n", i);
 			break;
 		}

+ 10 - 5
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c

@@ -58,14 +58,14 @@ static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
 	gfxhub_v1_0_init_gart_pt_regs(adev);
 
 	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
-		     (u32)(adev->mc.gtt_start >> 12));
+		     (u32)(adev->mc.gart_start >> 12));
 	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
-		     (u32)(adev->mc.gtt_start >> 44));
+		     (u32)(adev->mc.gart_start >> 44));
 
 	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
-		     (u32)(adev->mc.gtt_end >> 12));
+		     (u32)(adev->mc.gart_end >> 12));
 	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
-		     (u32)(adev->mc.gtt_end >> 44));
+		     (u32)(adev->mc.gart_end >> 44));
 }
 
 static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
@@ -129,7 +129,7 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
 	/* Setup L2 cache */
 	tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL);
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
-	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
+	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
 	/* XXX for emulation, Refer to closed source code.*/
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
 			    0);
@@ -144,6 +144,8 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
 	WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp);
 
 	tmp = mmVM_L2_CNTL3_DEFAULT;
+	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
+	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
 	WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp);
 
 	tmp = mmVM_L2_CNTL4_DEFAULT;
@@ -206,6 +208,9 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 				PAGE_TABLE_BLOCK_SIZE,
 				adev->vm_manager.block_size - 9);
+		/* Send no-retry XNACK on fault to suppress VM fault storm. */
+		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
 		WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i, tmp);
 		WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
 		WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);

+ 26 - 45
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c

@@ -66,14 +66,10 @@ static const u32 crtc_offsets[6] =
 	SI_CRTC5_REGISTER_OFFSET
 };
 
-static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
-			     struct amdgpu_mode_mc_save *save)
+static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
 {
 	u32 blackout;
 
-	if (adev->mode_info.num_crtc)
-		amdgpu_display_stop_mc_access(adev, save);
-
 	gmc_v6_0_wait_for_idle((void *)adev);
 
 	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
@@ -90,8 +86,7 @@ static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
 
 }
 
-static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
-			       struct amdgpu_mode_mc_save *save)
+static void gmc_v6_0_mc_resume(struct amdgpu_device *adev)
 {
 	u32 tmp;
 
@@ -103,10 +98,6 @@ static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
 	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
 	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
 	WREG32(mmBIF_FB_EN, tmp);
-
-	if (adev->mode_info.num_crtc)
-		amdgpu_display_resume_mc_access(adev, save);
-
 }
 
 static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
@@ -228,20 +219,20 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
 static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
 				       struct amdgpu_mc *mc)
 {
+	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
+	base <<= 24;
+
 	if (mc->mc_vram_size > 0xFFC0000000ULL) {
 		dev_warn(adev->dev, "limiting VRAM\n");
 		mc->real_vram_size = 0xFFC0000000ULL;
 		mc->mc_vram_size = 0xFFC0000000ULL;
 	}
-	amdgpu_vram_location(adev, &adev->mc, 0);
-	adev->mc.gtt_base_align = 0;
-	amdgpu_gtt_location(adev, mc);
+	amdgpu_vram_location(adev, &adev->mc, base);
+	amdgpu_gart_location(adev, mc);
 }
 
 static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
 {
-	struct amdgpu_mode_mc_save save;
-	u32 tmp;
 	int i, j;
 
 	/* Initialize HDP */
@@ -254,16 +245,23 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
 	}
 	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 
-	if (adev->mode_info.num_crtc)
-		amdgpu_display_set_vga_render_state(adev, false);
-
-	gmc_v6_0_mc_stop(adev, &save);
-
 	if (gmc_v6_0_wait_for_idle((void *)adev)) {
 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 	}
 
-	WREG32(mmVGA_HDP_CONTROL, VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK);
+	if (adev->mode_info.num_crtc) {
+		u32 tmp;
+
+		/* Lockout access through VGA aperture*/
+		tmp = RREG32(mmVGA_HDP_CONTROL);
+		tmp |= VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK;
+		WREG32(mmVGA_HDP_CONTROL, tmp);
+
+		/* disable VGA render */
+		tmp = RREG32(mmVGA_RENDER_CONTROL);
+		tmp &= ~VGA_VSTATUS_CNTL;
+		WREG32(mmVGA_RENDER_CONTROL, tmp);
+	}
 	/* Update configuration */
 	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 	       adev->mc.vram_start >> 12);
@@ -271,13 +269,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
 	       adev->mc.vram_end >> 12);
 	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
 	       adev->vram_scratch.gpu_addr >> 12);
-	tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
-	tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
-	WREG32(mmMC_VM_FB_LOCATION, tmp);
-	/* XXX double check these! */
-	WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
-	WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
-	WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
 	WREG32(mmMC_VM_AGP_BASE, 0);
 	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
 	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
@@ -285,7 +276,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
 	if (gmc_v6_0_wait_for_idle((void *)adev)) {
 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 	}
-	gmc_v6_0_mc_resume(adev, &save);
 }
 
 static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
@@ -342,15 +332,7 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
 	adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 	adev->mc.visible_vram_size = adev->mc.aper_size;
 
-	/* unless the user had overridden it, set the gart
-	 * size equal to the 1024 or vram, whichever is larger.
-	 */
-	if (amdgpu_gart_size == -1)
-		adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
-					adev->mc.mc_vram_size);
-	else
-		adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
-
+	amdgpu_gart_set_defaults(adev);
 	gmc_v6_0_vram_gtt_location(adev, &adev->mc);
 
 	return 0;
@@ -511,8 +493,8 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
 	       (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
 	       (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
 	/* setup context0 */
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12);
 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 			(u32)(adev->dummy_page.addr >> 12));
@@ -559,7 +541,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
 
 	gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
 	dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
-		 (unsigned)(adev->mc.gtt_size >> 20),
+		 (unsigned)(adev->mc.gart_size >> 20),
 		 (unsigned long long)adev->gart.table_addr);
 	adev->gart.ready = true;
 	return 0;
@@ -987,7 +969,6 @@ static int gmc_v6_0_wait_for_idle(void *handle)
 static int gmc_v6_0_soft_reset(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	struct amdgpu_mode_mc_save save;
 	u32 srbm_soft_reset = 0;
 	u32 tmp = RREG32(mmSRBM_STATUS);
 
@@ -1003,7 +984,7 @@ static int gmc_v6_0_soft_reset(void *handle)
 	}
 
 	if (srbm_soft_reset) {
-		gmc_v6_0_mc_stop(adev, &save);
+		gmc_v6_0_mc_stop(adev);
 		if (gmc_v6_0_wait_for_idle(adev)) {
 			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
 		}
@@ -1023,7 +1004,7 @@ static int gmc_v6_0_soft_reset(void *handle)
 
 		udelay(50);
 
-		gmc_v6_0_mc_resume(adev, &save);
+		gmc_v6_0_mc_resume(adev);
 		udelay(50);
 	}
 

+ 27 - 41
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c

@@ -37,6 +37,9 @@
 #include "oss/oss_2_0_d.h"
 #include "oss/oss_2_0_sh_mask.h"
 
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
 #include "amdgpu_atombios.h"
 
 static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
@@ -76,14 +79,10 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
 	}
 }
 
-static void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
-			     struct amdgpu_mode_mc_save *save)
+static void gmc_v7_0_mc_stop(struct amdgpu_device *adev)
 {
 	u32 blackout;
 
-	if (adev->mode_info.num_crtc)
-		amdgpu_display_stop_mc_access(adev, save);
-
 	gmc_v7_0_wait_for_idle((void *)adev);
 
 	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
@@ -99,8 +98,7 @@ static void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
 	udelay(100);
 }
 
-static void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
-			       struct amdgpu_mode_mc_save *save)
+static void gmc_v7_0_mc_resume(struct amdgpu_device *adev)
 {
 	u32 tmp;
 
@@ -112,9 +110,6 @@ static void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
 	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
 	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
 	WREG32(mmBIF_FB_EN, tmp);
-
-	if (adev->mode_info.num_crtc)
-		amdgpu_display_resume_mc_access(adev, save);
 }
 
 /**
@@ -242,15 +237,17 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
 static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
 				       struct amdgpu_mc *mc)
 {
+	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
+	base <<= 24;
+
 	if (mc->mc_vram_size > 0xFFC0000000ULL) {
 		/* leave room for at least 1024M GTT */
 		dev_warn(adev->dev, "limiting VRAM\n");
 		mc->real_vram_size = 0xFFC0000000ULL;
 		mc->mc_vram_size = 0xFFC0000000ULL;
 	}
-	amdgpu_vram_location(adev, &adev->mc, 0);
-	adev->mc.gtt_base_align = 0;
-	amdgpu_gtt_location(adev, mc);
+	amdgpu_vram_location(adev, &adev->mc, base);
+	amdgpu_gart_location(adev, mc);
 }
 
 /**
@@ -263,7 +260,6 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
  */
 static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
 {
-	struct amdgpu_mode_mc_save save;
 	u32 tmp;
 	int i, j;
 
@@ -277,13 +273,20 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
 	}
 	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 
-	if (adev->mode_info.num_crtc)
-		amdgpu_display_set_vga_render_state(adev, false);
-
-	gmc_v7_0_mc_stop(adev, &save);
 	if (gmc_v7_0_wait_for_idle((void *)adev)) {
 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 	}
+	if (adev->mode_info.num_crtc) {
+		/* Lockout access through VGA aperture*/
+		tmp = RREG32(mmVGA_HDP_CONTROL);
+		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
+		WREG32(mmVGA_HDP_CONTROL, tmp);
+
+		/* disable VGA render */
+		tmp = RREG32(mmVGA_RENDER_CONTROL);
+		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
+		WREG32(mmVGA_RENDER_CONTROL, tmp);
+	}
 	/* Update configuration */
 	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 	       adev->mc.vram_start >> 12);
@@ -291,20 +294,12 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
 	       adev->mc.vram_end >> 12);
 	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
 	       adev->vram_scratch.gpu_addr >> 12);
-	tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
-	tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
-	WREG32(mmMC_VM_FB_LOCATION, tmp);
-	/* XXX double check these! */
-	WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
-	WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
-	WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
 	WREG32(mmMC_VM_AGP_BASE, 0);
 	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
 	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
 	if (gmc_v7_0_wait_for_idle((void *)adev)) {
 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 	}
-	gmc_v7_0_mc_resume(adev, &save);
 
 	WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
 
@@ -391,15 +386,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
 	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
 		adev->mc.visible_vram_size = adev->mc.real_vram_size;
 
-	/* unless the user had overridden it, set the gart
-	 * size equal to the 1024 or vram, whichever is larger.
-	 */
-	if (amdgpu_gart_size == -1)
-		adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
-					adev->mc.mc_vram_size);
-	else
-		adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
-
+	amdgpu_gart_set_defaults(adev);
 	gmc_v7_0_vram_gtt_location(adev, &adev->mc);
 
 	return 0;
@@ -611,8 +598,8 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
 	WREG32(mmVM_L2_CNTL3, tmp);
 	/* setup context0 */
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12);
 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 			(u32)(adev->dummy_page.addr >> 12));
@@ -666,7 +653,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
 
 	gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
-		 (unsigned)(adev->mc.gtt_size >> 20),
+		 (unsigned)(adev->mc.gart_size >> 20),
 		 (unsigned long long)adev->gart.table_addr);
 	adev->gart.ready = true;
 	return 0;
@@ -1138,7 +1125,6 @@ static int gmc_v7_0_wait_for_idle(void *handle)
 static int gmc_v7_0_soft_reset(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	struct amdgpu_mode_mc_save save;
 	u32 srbm_soft_reset = 0;
 	u32 tmp = RREG32(mmSRBM_STATUS);
 
@@ -1154,7 +1140,7 @@ static int gmc_v7_0_soft_reset(void *handle)
 	}
 
 	if (srbm_soft_reset) {
-		gmc_v7_0_mc_stop(adev, &save);
+		gmc_v7_0_mc_stop(adev);
 		if (gmc_v7_0_wait_for_idle((void *)adev)) {
 			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
 		}
@@ -1175,7 +1161,7 @@ static int gmc_v7_0_soft_reset(void *handle)
 		/* Wait a little for things to settle down */
 		udelay(50);
 
-		gmc_v7_0_mc_resume(adev, &save);
+		gmc_v7_0_mc_resume(adev);
 		udelay(50);
 	}
 

+ 41 - 40
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c

@@ -35,6 +35,9 @@
 #include "oss/oss_3_0_d.h"
 #include "oss/oss_3_0_sh_mask.h"
 
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
 #include "vid.h"
 #include "vi.h"
 
@@ -161,14 +164,10 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
 	}
 }
 
-static void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
-			     struct amdgpu_mode_mc_save *save)
+static void gmc_v8_0_mc_stop(struct amdgpu_device *adev)
 {
 	u32 blackout;
 
-	if (adev->mode_info.num_crtc)
-		amdgpu_display_stop_mc_access(adev, save);
-
 	gmc_v8_0_wait_for_idle(adev);
 
 	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
@@ -184,8 +183,7 @@ static void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
 	udelay(100);
 }
 
-static void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
-			       struct amdgpu_mode_mc_save *save)
+static void gmc_v8_0_mc_resume(struct amdgpu_device *adev)
 {
 	u32 tmp;
 
@@ -197,9 +195,6 @@ static void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
 	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
 	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
 	WREG32(mmBIF_FB_EN, tmp);
-
-	if (adev->mode_info.num_crtc)
-		amdgpu_display_resume_mc_access(adev, save);
 }
 
 /**
@@ -404,15 +399,20 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
 static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
 				       struct amdgpu_mc *mc)
 {
+	u64 base = 0;
+
+	if (!amdgpu_sriov_vf(adev))
+		base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
+	base <<= 24;
+
 	if (mc->mc_vram_size > 0xFFC0000000ULL) {
 		/* leave room for at least 1024M GTT */
 		dev_warn(adev->dev, "limiting VRAM\n");
 		mc->real_vram_size = 0xFFC0000000ULL;
 		mc->mc_vram_size = 0xFFC0000000ULL;
 	}
-	amdgpu_vram_location(adev, &adev->mc, 0);
-	adev->mc.gtt_base_align = 0;
-	amdgpu_gtt_location(adev, mc);
+	amdgpu_vram_location(adev, &adev->mc, base);
+	amdgpu_gart_location(adev, mc);
 }
 
 /**
@@ -425,7 +425,6 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
  */
 static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
 {
-	struct amdgpu_mode_mc_save save;
 	u32 tmp;
 	int i, j;
 
@@ -439,13 +438,20 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
 	}
 	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 
-	if (adev->mode_info.num_crtc)
-		amdgpu_display_set_vga_render_state(adev, false);
-
-	gmc_v8_0_mc_stop(adev, &save);
 	if (gmc_v8_0_wait_for_idle((void *)adev)) {
 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 	}
+	if (adev->mode_info.num_crtc) {
+		/* Lockout access through VGA aperture*/
+		tmp = RREG32(mmVGA_HDP_CONTROL);
+		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
+		WREG32(mmVGA_HDP_CONTROL, tmp);
+
+		/* disable VGA render */
+		tmp = RREG32(mmVGA_RENDER_CONTROL);
+		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
+		WREG32(mmVGA_RENDER_CONTROL, tmp);
+	}
 	/* Update configuration */
 	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 	       adev->mc.vram_start >> 12);
@@ -453,20 +459,23 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
 	       adev->mc.vram_end >> 12);
 	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
 	       adev->vram_scratch.gpu_addr >> 12);
-	tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
-	tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
-	WREG32(mmMC_VM_FB_LOCATION, tmp);
-	/* XXX double check these! */
-	WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
-	WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
-	WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+
+	if (amdgpu_sriov_vf(adev)) {
+		tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
+		tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
+		WREG32(mmMC_VM_FB_LOCATION, tmp);
+		/* XXX double check these! */
+		WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
+		WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
+		WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+	}
+
 	WREG32(mmMC_VM_AGP_BASE, 0);
 	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
 	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
 	if (gmc_v8_0_wait_for_idle((void *)adev)) {
 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 	}
-	gmc_v8_0_mc_resume(adev, &save);
 
 	WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
 
@@ -553,15 +562,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
 	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
 		adev->mc.visible_vram_size = adev->mc.real_vram_size;
 
-	/* unless the user had overridden it, set the gart
-	 * size equal to the 1024 or vram, whichever is larger.
-	 */
-	if (amdgpu_gart_size == -1)
-		adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
-					adev->mc.mc_vram_size);
-	else
-		adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
-
+	amdgpu_gart_set_defaults(adev);
 	gmc_v8_0_vram_gtt_location(adev, &adev->mc);
 
 	return 0;
@@ -813,8 +814,8 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
 	WREG32(mmVM_L2_CNTL4, tmp);
 	/* setup context0 */
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12);
 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 			(u32)(adev->dummy_page.addr >> 12));
@@ -869,7 +870,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
 
 	gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
-		 (unsigned)(adev->mc.gtt_size >> 20),
+		 (unsigned)(adev->mc.gart_size >> 20),
 		 (unsigned long long)adev->gart.table_addr);
 	adev->gart.ready = true;
 	return 0;
@@ -1260,7 +1261,7 @@ static int gmc_v8_0_pre_soft_reset(void *handle)
 	if (!adev->mc.srbm_soft_reset)
 		return 0;
 
-	gmc_v8_0_mc_stop(adev, &adev->mc.save);
+	gmc_v8_0_mc_stop(adev);
 	if (gmc_v8_0_wait_for_idle(adev)) {
 		dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
 	}
@@ -1306,7 +1307,7 @@ static int gmc_v8_0_post_soft_reset(void *handle)
 	if (!adev->mc.srbm_soft_reset)
 		return 0;
 
-	gmc_v8_0_mc_resume(adev, &adev->mc.save);
+	gmc_v8_0_mc_resume(adev);
 	return 0;
 }
 

+ 80 - 59
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c

@@ -23,11 +23,14 @@
 #include <linux/firmware.h>
 #include "amdgpu.h"
 #include "gmc_v9_0.h"
+#include "amdgpu_atomfirmware.h"
 
 #include "vega10/soc15ip.h"
 #include "vega10/HDP/hdp_4_0_offset.h"
 #include "vega10/HDP/hdp_4_0_sh_mask.h"
 #include "vega10/GC/gc_9_0_sh_mask.h"
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
 #include "vega10/vega10_enum.h"
 
 #include "soc15_common.h"
@@ -419,8 +422,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
 	if (!amdgpu_sriov_vf(adev))
 		base = mmhub_v1_0_get_fb_location(adev);
 	amdgpu_vram_location(adev, &adev->mc, base);
-	adev->mc.gtt_base_align = 0;
-	amdgpu_gtt_location(adev, mc);
+	amdgpu_gart_location(adev, mc);
 	/* base offset of vram pages */
 	if (adev->flags & AMD_IS_APU)
 		adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
@@ -442,43 +444,46 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
 	u32 tmp;
 	int chansize, numchan;
 
-	/* hbm memory channel size */
-	chansize = 128;
-
-	tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
-	tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
-	tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
-	switch (tmp) {
-	case 0:
-	default:
-		numchan = 1;
-		break;
-	case 1:
-		numchan = 2;
-		break;
-	case 2:
-		numchan = 0;
-		break;
-	case 3:
-		numchan = 4;
-		break;
-	case 4:
-		numchan = 0;
-		break;
-	case 5:
-		numchan = 8;
-		break;
-	case 6:
-		numchan = 0;
-		break;
-	case 7:
-		numchan = 16;
-		break;
-	case 8:
-		numchan = 2;
-		break;
+	adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
+	if (!adev->mc.vram_width) {
+		/* hbm memory channel size */
+		chansize = 128;
+
+		tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
+		tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
+		tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
+		switch (tmp) {
+		case 0:
+		default:
+			numchan = 1;
+			break;
+		case 1:
+			numchan = 2;
+			break;
+		case 2:
+			numchan = 0;
+			break;
+		case 3:
+			numchan = 4;
+			break;
+		case 4:
+			numchan = 0;
+			break;
+		case 5:
+			numchan = 8;
+			break;
+		case 6:
+			numchan = 0;
+			break;
+		case 7:
+			numchan = 16;
+			break;
+		case 8:
+			numchan = 2;
+			break;
+		}
+		adev->mc.vram_width = numchan * chansize;
 	}
-	adev->mc.vram_width = numchan * chansize;
 
 	/* Could aper size report 0 ? */
 	adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
@@ -494,15 +499,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
 	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
 		adev->mc.visible_vram_size = adev->mc.real_vram_size;
 
-	/* unless the user had overridden it, set the gart
-	 * size equal to the 1024 or vram, whichever is larger.
-	 */
-	if (amdgpu_gart_size == -1)
-		adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
-					adev->mc.mc_vram_size);
-	else
-		adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
-
+	amdgpu_gart_set_defaults(adev);
 	gmc_v9_0_vram_gtt_location(adev, &adev->mc);
 
 	return 0;
@@ -537,10 +534,20 @@ static int gmc_v9_0_sw_init(void *handle)
 
 	spin_lock_init(&adev->mc.invalidate_lock);
 
-	if (adev->flags & AMD_IS_APU) {
+	switch (adev->asic_type) {
+	case CHIP_RAVEN:
 		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
-		amdgpu_vm_adjust_size(adev, 64);
-	} else {
+		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
+			adev->vm_manager.vm_size = 1U << 18;
+			adev->vm_manager.block_size = 9;
+			adev->vm_manager.num_level = 3;
+		} else {
+			/* vm_size is 64GB for legacy 2-level page support*/
+			amdgpu_vm_adjust_size(adev, 64);
+			adev->vm_manager.num_level = 1;
+		}
+		break;
+	case CHIP_VEGA10:
 		/* XXX Don't know how to get VRAM type yet. */
 		adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
 		/*
@@ -550,11 +557,16 @@ static int gmc_v9_0_sw_init(void *handle)
 		 */
 		adev->vm_manager.vm_size = 1U << 18;
 		adev->vm_manager.block_size = 9;
-		DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
-				adev->vm_manager.vm_size,
-				adev->vm_manager.block_size);
+		adev->vm_manager.num_level = 3;
+		break;
+	default:
+		break;
 	}
 
+	DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
+			adev->vm_manager.vm_size,
+			adev->vm_manager.block_size);
+
 	/* This interrupt is VMC page fault.*/
 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
 				&adev->mc.vm_fault);
@@ -619,11 +631,6 @@ static int gmc_v9_0_sw_init(void *handle)
 	adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
 	adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
 
-	/* TODO: fix num_level for APU when updating vm size and block size */
-	if (adev->flags & AMD_IS_APU)
-		adev->vm_manager.num_level = 1;
-	else
-		adev->vm_manager.num_level = 3;
 	amdgpu_vm_manager_init(adev);
 
 	return 0;
@@ -731,7 +738,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
 	gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
 
 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
-		 (unsigned)(adev->mc.gtt_size >> 20),
+		 (unsigned)(adev->mc.gart_size >> 20),
 		 (unsigned long long)adev->gart.table_addr);
 	adev->gart.ready = true;
 	return 0;
@@ -745,6 +752,20 @@ static int gmc_v9_0_hw_init(void *handle)
 	/* The sequence of these two function calls matters.*/
 	gmc_v9_0_init_golden_registers(adev);
 
+	if (adev->mode_info.num_crtc) {
+		u32 tmp;
+
+		/* Lockout access through VGA aperture*/
+		tmp = RREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL);
+		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
+		WREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL, tmp);
+
+		/* disable VGA render */
+		tmp = RREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL);
+		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
+		WREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL, tmp);
+	}
+
 	r = gmc_v9_0_gart_enable(adev);
 
 	return r;

+ 60 - 48
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c

@@ -69,14 +69,14 @@ static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
 	mmhub_v1_0_init_gart_pt_regs(adev);
 
 	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
-		     (u32)(adev->mc.gtt_start >> 12));
+		     (u32)(adev->mc.gart_start >> 12));
 	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
-		     (u32)(adev->mc.gtt_start >> 44));
+		     (u32)(adev->mc.gart_start >> 44));
 
 	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
-		     (u32)(adev->mc.gtt_end >> 12));
+		     (u32)(adev->mc.gart_end >> 12));
 	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
-		     (u32)(adev->mc.gtt_end >> 44));
+		     (u32)(adev->mc.gart_end >> 44));
 }
 
 static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
@@ -143,7 +143,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
 	/* Setup L2 cache */
 	tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
-	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
+	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
 	/* XXX for emulation, Refer to closed source code.*/
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
 			    0);
@@ -158,6 +158,8 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
 	WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
 
 	tmp = mmVM_L2_CNTL3_DEFAULT;
+	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
+	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
 	WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
 
 	tmp = mmVM_L2_CNTL4_DEFAULT;
@@ -222,6 +224,9 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
 				PAGE_TABLE_BLOCK_SIZE,
 				adev->vm_manager.block_size - 9);
+		/* Send no-retry XNACK on fault to suppress VM fault storm. */
+		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
 		WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i, tmp);
 		WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
 		WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
@@ -245,28 +250,28 @@ static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev)
 }
 
 struct pctl_data {
-    uint32_t index;
-    uint32_t data;
+	uint32_t index;
+	uint32_t data;
 };
 
-const struct pctl_data pctl0_data[] = {
-    {0x0, 0x7a640},
-    {0x9, 0x2a64a},
-    {0xd, 0x2a680},
-    {0x11, 0x6a684},
-    {0x19, 0xea68e},
-    {0x29, 0xa69e},
-    {0x2b, 0x34a6c0},
-    {0x61, 0x83a707},
-    {0xe6, 0x8a7a4},
-    {0xf0, 0x1a7b8},
-    {0xf3, 0xfa7cc},
-    {0x104, 0x17a7dd},
-    {0x11d, 0xa7dc},
-    {0x11f, 0x12a7f5},
-    {0x133, 0xa808},
-    {0x135, 0x12a810},
-    {0x149, 0x7a82c}
+static const struct pctl_data pctl0_data[] = {
+	{0x0, 0x7a640},
+	{0x9, 0x2a64a},
+	{0xd, 0x2a680},
+	{0x11, 0x6a684},
+	{0x19, 0xea68e},
+	{0x29, 0xa69e},
+	{0x2b, 0x34a6c0},
+	{0x61, 0x83a707},
+	{0xe6, 0x8a7a4},
+	{0xf0, 0x1a7b8},
+	{0xf3, 0xfa7cc},
+	{0x104, 0x17a7dd},
+	{0x11d, 0xa7dc},
+	{0x11f, 0x12a7f5},
+	{0x133, 0xa808},
+	{0x135, 0x12a810},
+	{0x149, 0x7a82c}
 };
 #define PCTL0_DATA_LEN (sizeof(pctl0_data)/sizeof(pctl0_data[0]))
 
@@ -274,32 +279,39 @@ const struct pctl_data pctl0_data[] = {
 #define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE  0xa640
 #define PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa833
 
-const struct pctl_data pctl1_data[] = {
-    {0x0, 0x39a000},
-    {0x3b, 0x44a040},
-    {0x81, 0x2a08d},
-    {0x85, 0x6ba094},
-    {0xf2, 0x18a100},
-    {0x10c, 0x4a132},
-    {0x112, 0xca141},
-    {0x120, 0x2fa158},
-    {0x151, 0x17a1d0},
-    {0x16a, 0x1a1e9},
-    {0x16d, 0x13a1ec},
-    {0x182, 0x7a201},
-    {0x18b, 0x3a20a},
-    {0x190, 0x7a580},
-    {0x199, 0xa590},
-    {0x19b, 0x4a594},
-    {0x1a1, 0x1a59c},
-    {0x1a4, 0x7a82c},
-    {0x1ad, 0xfa7cc},
-    {0x1be, 0x17a7dd},
-    {0x1d7, 0x12a810}
+static const struct pctl_data pctl1_data[] = {
+	{0x0, 0x39a000},
+	{0x3b, 0x44a040},
+	{0x81, 0x2a08d},
+	{0x85, 0x6ba094},
+	{0xf2, 0x18a100},
+	{0x10c, 0x4a132},
+	{0x112, 0xca141},
+	{0x120, 0x2fa158},
+	{0x151, 0x17a1d0},
+	{0x16a, 0x1a1e9},
+	{0x16d, 0x13a1ec},
+	{0x182, 0x7a201},
+	{0x18b, 0x3a20a},
+	{0x190, 0x7a580},
+	{0x199, 0xa590},
+	{0x19b, 0x4a594},
+	{0x1a1, 0x1a59c},
+	{0x1a4, 0x7a82c},
+	{0x1ad, 0xfa7cc},
+	{0x1be, 0x17a7dd},
+	{0x1d7, 0x12a810},
+	{0x1eb, 0x4000a7e1},
+	{0x1ec, 0x5000a7f5},
+	{0x1ed, 0x4000a7e2},
+	{0x1ee, 0x5000a7dc},
+	{0x1ef, 0x4000a7e3},
+	{0x1f0, 0x5000a7f6},
+	{0x1f1, 0x5000a7e4}
 };
 #define PCTL1_DATA_LEN (sizeof(pctl1_data)/sizeof(pctl1_data[0]))
 
-#define PCTL1_RENG_EXEC_END_PTR 0x1ea
+#define PCTL1_RENG_EXEC_END_PTR 0x1f1
 #define PCTL1_STCTRL_REG_SAVE_RANGE0_BASE  0xa000
 #define PCTL1_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa20d
 #define PCTL1_STCTRL_REG_SAVE_RANGE1_BASE  0xa580

+ 26 - 20
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c

@@ -72,21 +72,6 @@ static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
 		      reg);
 }
 
-static void xgpu_ai_mailbox_trans_msg(struct amdgpu_device *adev,
-				      enum idh_request req)
-{
-	u32 reg;
-
-	reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
-					     mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
-	reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
-			    MSGBUF_DATA, req);
-	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
-		      reg);
-
-	xgpu_ai_mailbox_set_valid(adev, true);
-}
-
 static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
 				   enum idh_event event)
 {
@@ -154,13 +139,25 @@ static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
 	return r;
 }
 
-
-static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
-					enum idh_request req)
-{
+static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
+	      enum idh_request req, u32 data1, u32 data2, u32 data3) {
+	u32 reg;
 	int r;
 
-	xgpu_ai_mailbox_trans_msg(adev, req);
+	reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+					     mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
+	reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
+			    MSGBUF_DATA, req);
+	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
+		      reg);
+	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1),
+				data1);
+	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2),
+				data2);
+	WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3),
+				data3);
+
+	xgpu_ai_mailbox_set_valid(adev, true);
 
 	/* start to poll ack */
 	r = xgpu_ai_poll_ack(adev);
@@ -168,6 +165,14 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
 		pr_err("Doesn't get ack from pf, continue\n");
 
 	xgpu_ai_mailbox_set_valid(adev, false);
+}
+
+static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
+					enum idh_request req)
+{
+	int r;
+
+	xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
 
 	/* start to check msg if request is idh_req_gpu_init_access */
 	if (req == IDH_REQ_GPU_INIT_ACCESS ||
@@ -342,4 +347,5 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
 	.req_full_gpu	= xgpu_ai_request_full_gpu_access,
 	.rel_full_gpu	= xgpu_ai_release_full_gpu_access,
 	.reset_gpu = xgpu_ai_request_reset,
+	.trans_msg = xgpu_ai_mailbox_trans_msg,
 };

+ 3 - 1
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h

@@ -31,7 +31,9 @@ enum idh_request {
 	IDH_REL_GPU_INIT_ACCESS,
 	IDH_REQ_GPU_FINI_ACCESS,
 	IDH_REL_GPU_FINI_ACCESS,
-	IDH_REQ_GPU_RESET_ACCESS
+	IDH_REQ_GPU_RESET_ACCESS,
+
+	IDH_LOG_VF_ERROR       = 200,
 };
 
 enum idh_event {

+ 1 - 0
drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c

@@ -613,4 +613,5 @@ const struct amdgpu_virt_ops xgpu_vi_virt_ops = {
 	.req_full_gpu		= xgpu_vi_request_full_gpu_access,
 	.rel_full_gpu		= xgpu_vi_release_full_gpu_access,
 	.reset_gpu		= xgpu_vi_request_reset,
+	.trans_msg		= NULL, /* Does not need to trans VF errors to host. */
 };

+ 3 - 1
drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h

@@ -32,7 +32,9 @@ enum idh_request {
 	IDH_REL_GPU_INIT_ACCESS,
 	IDH_REQ_GPU_FINI_ACCESS,
 	IDH_REL_GPU_FINI_ACCESS,
-	IDH_REQ_GPU_RESET_ACCESS
+	IDH_REQ_GPU_RESET_ACCESS,
+
+	IDH_LOG_VF_ERROR       = 200,
 };
 
 /* VI mailbox messages data */

+ 14 - 1
drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c

@@ -32,6 +32,7 @@
 
 #define smnCPM_CONTROL                                                                                  0x11180460
 #define smnPCIE_CNTL2                                                                                   0x11180070
+#define smnPCIE_CONFIG_CNTL                                                                             0x11180044
 
 u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
 {
@@ -67,7 +68,7 @@ void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
 
 void nbio_v6_1_hdp_flush(struct amdgpu_device *adev)
 {
-	WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+	WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
 }
 
 u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
@@ -256,3 +257,15 @@ void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
 			adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
 	}
 }
+
+void nbio_v6_1_init_registers(struct amdgpu_device *adev)
+{
+	uint32_t def, data;
+
+	def = data = RREG32_PCIE(smnPCIE_CONFIG_CNTL);
+	data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
+	data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
+
+	if (def != data)
+		WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
+}

+ 1 - 0
drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h

@@ -50,5 +50,6 @@ void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev, bool
 void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev, bool enable);
 void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags);
 void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev);
+void nbio_v6_1_init_registers(struct amdgpu_device *adev);
 
 #endif

+ 1 - 1
drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c

@@ -65,7 +65,7 @@ void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
 
 void nbio_v7_0_hdp_flush(struct amdgpu_device *adev)
 {
-	WREG32_SOC15(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+	WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
 }
 
 u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)

+ 90 - 6
drivers/gpu/drm/amd/amdgpu/psp_v10_0.c

@@ -86,6 +86,52 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *
 	return 0;
 }
 
+int psp_v10_0_init_microcode(struct psp_context *psp)
+{
+	struct amdgpu_device *adev = psp->adev;
+	const char *chip_name;
+	char fw_name[30];
+	int err = 0;
+	const struct psp_firmware_header_v1_0 *hdr;
+
+	DRM_DEBUG("\n");
+
+	switch (adev->asic_type) {
+	case CHIP_RAVEN:
+		chip_name = "raven";
+		break;
+	default: BUG();
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
+	err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
+	if (err)
+		goto out;
+
+	err = amdgpu_ucode_validate(adev->psp.asd_fw);
+	if (err)
+		goto out;
+
+	hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
+	adev->psp.asd_fw_version = le32_to_cpu(hdr->header.ucode_version);
+	adev->psp.asd_feature_version = le32_to_cpu(hdr->ucode_feature_version);
+	adev->psp.asd_ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+	adev->psp.asd_start_addr = (uint8_t *)hdr +
+				le32_to_cpu(hdr->header.ucode_array_offset_bytes);
+
+	return 0;
+out:
+	if (err) {
+		dev_err(adev->dev,
+			"psp v10.0: Failed to load firmware \"%s\"\n",
+			fw_name);
+		release_firmware(adev->psp.asd_fw);
+		adev->psp.asd_fw = NULL;
+	}
+
+	return err;
+}
+
 int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd)
 {
 	int ret;
@@ -110,7 +156,6 @@ int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cm
 int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
 {
 	int ret = 0;
-	unsigned int psp_ring_reg = 0;
 	struct psp_ring *ring;
 	struct amdgpu_device *adev = psp->adev;
 
@@ -130,6 +175,16 @@ int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
 		return ret;
 	}
 
+	return 0;
+}
+
+int psp_v10_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
+{
+	int ret = 0;
+	unsigned int psp_ring_reg = 0;
+	struct psp_ring *ring = &psp->km_ring;
+	struct amdgpu_device *adev = psp->adev;
+
 	/* Write low address of the ring to C2PMSG_69 */
 	psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
@@ -143,13 +198,42 @@ int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
 	psp_ring_reg = ring_type;
 	psp_ring_reg = psp_ring_reg << 16;
 	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+	/* There might be handshake issue with hardware which needs delay */
+	mdelay(20);
+
 	/* Wait for response flag (bit 31) in C2PMSG_64 */
-	psp_ring_reg = 0;
-	while ((psp_ring_reg & 0x80000000) == 0) {
-		psp_ring_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64);
-	}
+	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+			   0x80000000, 0x8000FFFF, false);
 
-	return 0;
+	return ret;
+}
+
+int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
+{
+	int ret = 0;
+	struct psp_ring *ring;
+	unsigned int psp_ring_reg = 0;
+	struct amdgpu_device *adev = psp->adev;
+
+	ring = &psp->km_ring;
+
+	/* Write the ring destroy command to C2PMSG_64 */
+	psp_ring_reg = 3 << 16;
+	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+	/* There might be handshake issue with hardware which needs delay */
+	mdelay(20);
+
+	/* Wait for response flag (bit 31) in C2PMSG_64 */
+	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+			   0x80000000, 0x80000000, false);
+
+	amdgpu_bo_free_kernel(&adev->firmware.rbuf,
+			      &ring->ring_mem_mc_addr,
+			      (void **)&ring->ring_mem);
+
+	return ret;
 }
 
 int psp_v10_0_cmd_submit(struct psp_context *psp,

+ 5 - 0
drivers/gpu/drm/amd/amdgpu/psp_v10_0.h

@@ -27,10 +27,15 @@
 
 #include "amdgpu_psp.h"
 
+extern int psp_v10_0_init_microcode(struct psp_context *psp);
 extern int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
 				 struct psp_gfx_cmd_resp *cmd);
 extern int psp_v10_0_ring_init(struct psp_context *psp,
 			      enum psp_ring_type ring_type);
+extern int psp_v10_0_ring_create(struct psp_context *psp,
+				 enum psp_ring_type ring_type);
+extern int psp_v10_0_ring_destroy(struct psp_context *psp,
+				  enum psp_ring_type ring_type);
 extern int psp_v10_0_cmd_submit(struct psp_context *psp,
 			       struct amdgpu_firmware_info *ucode,
 			       uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,

+ 4 - 6
drivers/gpu/drm/amd/amdgpu/psp_v3_1.c

@@ -237,11 +237,9 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
 
 	/* there might be handshake issue with hardware which needs delay */
 	mdelay(20);
-#if 0
 	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
 			   RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
 			   0, true);
-#endif
 
 	return ret;
 }
@@ -341,10 +339,10 @@ int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
 	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
 			   0x80000000, 0x80000000, false);
 
-	if (ring->ring_mem)
-		amdgpu_bo_free_kernel(&adev->firmware.rbuf,
-				      &ring->ring_mem_mc_addr,
-				      (void **)&ring->ring_mem);
+	amdgpu_bo_free_kernel(&adev->firmware.rbuf,
+			      &ring->ring_mem_mc_addr,
+			      (void **)&ring->ring_mem);
+
 	return ret;
 }
 

+ 39 - 3
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c

@@ -551,17 +551,53 @@ static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev)
  */
 static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
 {
-	u32 f32_cntl;
+	u32 f32_cntl, phase_quantum = 0;
 	int i;
 
+	if (amdgpu_sdma_phase_quantum) {
+		unsigned value = amdgpu_sdma_phase_quantum;
+		unsigned unit = 0;
+
+		while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
+				SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
+			value = (value + 1) >> 1;
+			unit++;
+		}
+		if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
+			    SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
+			value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
+				 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
+			unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
+				SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
+			WARN_ONCE(1,
+			"clamping sdma_phase_quantum to %uK clock cycles\n",
+				  value << unit);
+		}
+		phase_quantum =
+			value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
+			unit  << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
+	}
+
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
-		if (enable)
+		if (enable) {
 			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
 					AUTO_CTXSW_ENABLE, 1);
-		else
+			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+					ATC_L1_ENABLE, 1);
+			if (amdgpu_sdma_phase_quantum) {
+				WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i],
+				       phase_quantum);
+				WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i],
+				       phase_quantum);
+			}
+		} else {
 			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
 					AUTO_CTXSW_ENABLE, 0);
+			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+					ATC_L1_ENABLE, 1);
+		}
+
 		WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl);
 	}
 }

+ 33 - 1
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c

@@ -493,13 +493,45 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
  */
 static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
 {
-	u32 f32_cntl;
+	u32 f32_cntl, phase_quantum = 0;
 	int i;
 
+	if (amdgpu_sdma_phase_quantum) {
+		unsigned value = amdgpu_sdma_phase_quantum;
+		unsigned unit = 0;
+
+		while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
+				SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
+			value = (value + 1) >> 1;
+			unit++;
+		}
+		if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
+			    SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
+			value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
+				 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
+			unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
+				SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
+			WARN_ONCE(1,
+			"clamping sdma_phase_quantum to %uK clock cycles\n",
+				  value << unit);
+		}
+		phase_quantum =
+			value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
+			unit  << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
+	}
+
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		f32_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL));
 		f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
 				AUTO_CTXSW_ENABLE, enable ? 1 : 0);
+		if (enable && amdgpu_sdma_phase_quantum) {
+			WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE0_QUANTUM),
+			       phase_quantum);
+			WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE1_QUANTUM),
+			       phase_quantum);
+			WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE2_QUANTUM),
+			       phase_quantum);
+		}
 		WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL), f32_cntl);
 	}
 

+ 28 - 0
drivers/gpu/drm/amd/amdgpu/si.c

@@ -1150,6 +1150,33 @@ static bool si_read_disabled_bios(struct amdgpu_device *adev)
 	return r;
 }
 
+#define mmROM_INDEX 0x2A
+#define mmROM_DATA  0x2B
+
+static bool si_read_bios_from_rom(struct amdgpu_device *adev,
+				  u8 *bios, u32 length_bytes)
+{
+	u32 *dw_ptr;
+	u32 i, length_dw;
+
+	if (bios == NULL)
+		return false;
+	if (length_bytes == 0)
+		return false;
+	/* APU vbios image is part of sbios image */
+	if (adev->flags & AMD_IS_APU)
+		return false;
+
+	dw_ptr = (u32 *)bios;
+	length_dw = ALIGN(length_bytes, 4) / 4;
+	/* set rom index to 0 */
+	WREG32(mmROM_INDEX, 0);
+	for (i = 0; i < length_dw; i++)
+		dw_ptr[i] = RREG32(mmROM_DATA);
+
+	return true;
+}
+
 //xxx: not implemented
 static int si_asic_reset(struct amdgpu_device *adev)
 {
@@ -1206,6 +1233,7 @@ static void si_detect_hw_virtualization(struct amdgpu_device *adev)
 static const struct amdgpu_asic_funcs si_asic_funcs =
 {
 	.read_disabled_bios = &si_read_disabled_bios,
+	.read_bios_from_rom = &si_read_bios_from_rom,
 	.read_register = &si_read_register,
 	.reset = &si_asic_reset,
 	.set_vga_state = &si_vga_set_state,

+ 1 - 0
drivers/gpu/drm/amd/amdgpu/si_dpm.c

@@ -2055,6 +2055,7 @@ static void si_initialize_powertune_defaults(struct amdgpu_device *adev)
 		case 0x682C:
 			si_pi->cac_weights = cac_weights_cape_verde_pro;
 			si_pi->dte_data = dte_data_sun_xt;
+			update_dte_from_pl2 = true;
 			break;
 		case 0x6825:
 		case 0x6827:

+ 54 - 12
drivers/gpu/drm/amd/amdgpu/soc15.c

@@ -25,7 +25,7 @@
 #include <linux/module.h>
 #include <drm/drmP.h>
 #include "amdgpu.h"
-#include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
 #include "amdgpu_ih.h"
 #include "amdgpu_uvd.h"
 #include "amdgpu_vce.h"
@@ -62,8 +62,6 @@
 #include "dce_virtual.h"
 #include "mxgpu_ai.h"
 
-MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
-
 #define mmFabricConfigAccessControl                                                                    0x0410
 #define mmFabricConfigAccessControl_BASE_IDX                                                           0
 #define mmFabricConfigAccessControl_DEFAULT                                      0x00000000
@@ -198,6 +196,50 @@ static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 }
 
+static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
+	WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
+	r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
+	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
+	return r;
+}
+
+static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
+	WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
+	WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
+	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
+}
+
+static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
+{
+	unsigned long flags;
+	u32 r;
+
+	spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
+	WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
+	r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
+	spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
+	return r;
+}
+
+static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
+	WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
+	WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
+	spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
+}
+
 static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
 {
 	if (adev->flags & AMD_IS_APU)
@@ -392,11 +434,11 @@ static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev)
 
 static int soc15_asic_reset(struct amdgpu_device *adev)
 {
-	amdgpu_atomfirmware_scratch_regs_engine_hung(adev, true);
+	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
 
 	soc15_gpu_pci_config_reset(adev);
 
-	amdgpu_atomfirmware_scratch_regs_engine_hung(adev, false);
+	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
 
 	return 0;
 }
@@ -524,13 +566,6 @@ static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
 		return nbio_v6_1_get_rev_id(adev);
 }
 
-
-int gmc_v9_0_mc_wait_for_idle(struct amdgpu_device *adev)
-{
-	/* to be implemented in MC IP*/
-	return 0;
-}
-
 static const struct amdgpu_asic_funcs soc15_asic_funcs =
 {
 	.read_disabled_bios = &soc15_read_disabled_bios,
@@ -557,6 +592,10 @@ static int soc15_common_early_init(void *handle)
 	adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
 	adev->didt_rreg = &soc15_didt_rreg;
 	adev->didt_wreg = &soc15_didt_wreg;
+	adev->gc_cac_rreg = &soc15_gc_cac_rreg;
+	adev->gc_cac_wreg = &soc15_gc_cac_wreg;
+	adev->se_cac_rreg = &soc15_se_cac_rreg;
+	adev->se_cac_wreg = &soc15_se_cac_wreg;
 
 	adev->asic_funcs = &soc15_asic_funcs;
 
@@ -681,6 +720,9 @@ static int soc15_common_hw_init(void *handle)
 	soc15_pcie_gen3_enable(adev);
 	/* enable aspm */
 	soc15_program_aspm(adev);
+	/* setup nbio registers */
+	if (!(adev->flags & AMD_IS_APU))
+		nbio_v6_1_init_registers(adev);
 	/* enable the doorbell aperture */
 	soc15_enable_doorbell_aperture(adev, true);
 

+ 7 - 0
drivers/gpu/drm/amd/amdgpu/soc15_common.h

@@ -77,6 +77,13 @@ struct nbio_pcie_index_data {
 		(3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
 		(ip##_BASE__INST##inst##_SEG4 + reg))))), value)
 
+#define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \
+	WREG32_NO_KIQ( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
+		(1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
+		(2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
+		(3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
+		(ip##_BASE__INST##inst##_SEG4 + reg))))), value)
+
 #define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \
 	WREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
 		(1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \

+ 1 - 1
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c

@@ -664,7 +664,7 @@ static int set_sched_resources(struct device_queue_manager *dqm)
 		/* This situation may be hit in the future if a new HW
 		 * generation exposes more than 64 queues. If so, the
 		 * definition of res.queue_mask needs updating */
-		if (WARN_ON(i > (sizeof(res.queue_mask)*8))) {
+		if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
 			pr_err("Invalid queue enabled by amdgpu: %d\n", i);
 			break;
 		}

+ 63 - 0
drivers/gpu/drm/amd/include/atomfirmware.h

@@ -1233,6 +1233,69 @@ struct  atom_asic_profiling_info_v4_1
   uint32_t  phyclk2gfxclk_c;
 };
 
+struct  atom_asic_profiling_info_v4_2 {
+	struct  atom_common_table_header  table_header;
+	uint32_t  maxvddc;
+	uint32_t  minvddc;
+	uint32_t  avfs_meannsigma_acontant0;
+	uint32_t  avfs_meannsigma_acontant1;
+	uint32_t  avfs_meannsigma_acontant2;
+	uint16_t  avfs_meannsigma_dc_tol_sigma;
+	uint16_t  avfs_meannsigma_platform_mean;
+	uint16_t  avfs_meannsigma_platform_sigma;
+	uint32_t  gb_vdroop_table_cksoff_a0;
+	uint32_t  gb_vdroop_table_cksoff_a1;
+	uint32_t  gb_vdroop_table_cksoff_a2;
+	uint32_t  gb_vdroop_table_ckson_a0;
+	uint32_t  gb_vdroop_table_ckson_a1;
+	uint32_t  gb_vdroop_table_ckson_a2;
+	uint32_t  avfsgb_fuse_table_cksoff_m1;
+	uint32_t  avfsgb_fuse_table_cksoff_m2;
+	uint32_t  avfsgb_fuse_table_cksoff_b;
+	uint32_t  avfsgb_fuse_table_ckson_m1;
+	uint32_t  avfsgb_fuse_table_ckson_m2;
+	uint32_t  avfsgb_fuse_table_ckson_b;
+	uint16_t  max_voltage_0_25mv;
+	uint8_t   enable_gb_vdroop_table_cksoff;
+	uint8_t   enable_gb_vdroop_table_ckson;
+	uint8_t   enable_gb_fuse_table_cksoff;
+	uint8_t   enable_gb_fuse_table_ckson;
+	uint16_t  psm_age_comfactor;
+	uint8_t   enable_apply_avfs_cksoff_voltage;
+	uint8_t   reserved;
+	uint32_t  dispclk2gfxclk_a;
+	uint32_t  dispclk2gfxclk_b;
+	uint32_t  dispclk2gfxclk_c;
+	uint32_t  pixclk2gfxclk_a;
+	uint32_t  pixclk2gfxclk_b;
+	uint32_t  pixclk2gfxclk_c;
+	uint32_t  dcefclk2gfxclk_a;
+	uint32_t  dcefclk2gfxclk_b;
+	uint32_t  dcefclk2gfxclk_c;
+	uint32_t  phyclk2gfxclk_a;
+	uint32_t  phyclk2gfxclk_b;
+	uint32_t  phyclk2gfxclk_c;
+	uint32_t  acg_gb_vdroop_table_a0;
+	uint32_t  acg_gb_vdroop_table_a1;
+	uint32_t  acg_gb_vdroop_table_a2;
+	uint32_t  acg_avfsgb_fuse_table_m1;
+	uint32_t  acg_avfsgb_fuse_table_m2;
+	uint32_t  acg_avfsgb_fuse_table_b;
+	uint8_t   enable_acg_gb_vdroop_table;
+	uint8_t   enable_acg_gb_fuse_table;
+	uint32_t  acg_dispclk2gfxclk_a;
+	uint32_t  acg_dispclk2gfxclk_b;
+	uint32_t  acg_dispclk2gfxclk_c;
+	uint32_t  acg_pixclk2gfxclk_a;
+	uint32_t  acg_pixclk2gfxclk_b;
+	uint32_t  acg_pixclk2gfxclk_c;
+	uint32_t  acg_dcefclk2gfxclk_a;
+	uint32_t  acg_dcefclk2gfxclk_b;
+	uint32_t  acg_dcefclk2gfxclk_c;
+	uint32_t  acg_phyclk2gfxclk_a;
+	uint32_t  acg_phyclk2gfxclk_b;
+	uint32_t  acg_phyclk2gfxclk_c;
+};
 
 /* 
   ***************************************************************************

+ 6 - 0
drivers/gpu/drm/amd/include/cgs_common.h

@@ -50,6 +50,7 @@ enum cgs_ind_reg {
 	CGS_IND_REG__UVD_CTX,
 	CGS_IND_REG__DIDT,
 	CGS_IND_REG_GC_CAC,
+	CGS_IND_REG_SE_CAC,
 	CGS_IND_REG__AUDIO_ENDPT
 };
 
@@ -406,6 +407,8 @@ typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device);
 
 typedef int (*cgs_enter_safe_mode)(struct cgs_device *cgs_device, bool en);
 
+typedef void (*cgs_lock_grbm_idx)(struct cgs_device *cgs_device, bool lock);
+
 struct cgs_ops {
 	/* memory management calls (similar to KFD interface) */
 	cgs_alloc_gpu_mem_t alloc_gpu_mem;
@@ -441,6 +444,7 @@ struct cgs_ops {
 	cgs_query_system_info query_system_info;
 	cgs_is_virtualization_enabled_t is_virtualization_enabled;
 	cgs_enter_safe_mode enter_safe_mode;
+	cgs_lock_grbm_idx lock_grbm_idx;
 };
 
 struct cgs_os_ops; /* To be define in OS-specific CGS header */
@@ -517,4 +521,6 @@ struct cgs_device
 #define cgs_enter_safe_mode(cgs_device, en) \
 		CGS_CALL(enter_safe_mode, cgs_device, en)
 
+#define cgs_lock_grbm_idx(cgs_device, lock) \
+		CGS_CALL(lock_grbm_idx, cgs_device, lock)
 #endif /* _CGS_COMMON_H */

+ 169 - 70
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c

@@ -276,7 +276,10 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
 		struct pp_atomfwctrl_avfs_parameters *param)
 {
 	uint16_t idx;
+	uint8_t format_revision, content_revision;
+
 	struct atom_asic_profiling_info_v4_1 *profile;
+	struct atom_asic_profiling_info_v4_2 *profile_v4_2;
 
 	idx = GetIndexIntoMasterDataTable(asic_profiling_info);
 	profile = (struct atom_asic_profiling_info_v4_1 *)
@@ -286,76 +289,172 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
 	if (!profile)
 		return -1;
 
-	param->ulMaxVddc = le32_to_cpu(profile->maxvddc);
-	param->ulMinVddc = le32_to_cpu(profile->minvddc);
-	param->ulMeanNsigmaAcontant0 =
-			le32_to_cpu(profile->avfs_meannsigma_acontant0);
-	param->ulMeanNsigmaAcontant1 =
-			le32_to_cpu(profile->avfs_meannsigma_acontant1);
-	param->ulMeanNsigmaAcontant2 =
-			le32_to_cpu(profile->avfs_meannsigma_acontant2);
-	param->usMeanNsigmaDcTolSigma =
-			le16_to_cpu(profile->avfs_meannsigma_dc_tol_sigma);
-	param->usMeanNsigmaPlatformMean =
-			le16_to_cpu(profile->avfs_meannsigma_platform_mean);
-	param->usMeanNsigmaPlatformSigma =
-			le16_to_cpu(profile->avfs_meannsigma_platform_sigma);
-	param->ulGbVdroopTableCksoffA0 =
-			le32_to_cpu(profile->gb_vdroop_table_cksoff_a0);
-	param->ulGbVdroopTableCksoffA1 =
-			le32_to_cpu(profile->gb_vdroop_table_cksoff_a1);
-	param->ulGbVdroopTableCksoffA2 =
-			le32_to_cpu(profile->gb_vdroop_table_cksoff_a2);
-	param->ulGbVdroopTableCksonA0 =
-			le32_to_cpu(profile->gb_vdroop_table_ckson_a0);
-	param->ulGbVdroopTableCksonA1 =
-			le32_to_cpu(profile->gb_vdroop_table_ckson_a1);
-	param->ulGbVdroopTableCksonA2 =
-			le32_to_cpu(profile->gb_vdroop_table_ckson_a2);
-	param->ulGbFuseTableCksoffM1 =
-			le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m1);
-	param->ulGbFuseTableCksoffM2 =
-			le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m2);
-	param->ulGbFuseTableCksoffB =
-			le32_to_cpu(profile->avfsgb_fuse_table_cksoff_b);
-	param->ulGbFuseTableCksonM1 =
-			le32_to_cpu(profile->avfsgb_fuse_table_ckson_m1);
-	param->ulGbFuseTableCksonM2 =
-			le32_to_cpu(profile->avfsgb_fuse_table_ckson_m2);
-	param->ulGbFuseTableCksonB =
-			le32_to_cpu(profile->avfsgb_fuse_table_ckson_b);
-
-	param->ucEnableGbVdroopTableCkson =
-			profile->enable_gb_vdroop_table_ckson;
-	param->ucEnableGbFuseTableCkson =
-			profile->enable_gb_fuse_table_ckson;
-	param->usPsmAgeComfactor =
-			le16_to_cpu(profile->psm_age_comfactor);
-
-	param->ulDispclk2GfxclkM1 =
-			le32_to_cpu(profile->dispclk2gfxclk_a);
-	param->ulDispclk2GfxclkM2 =
-			le32_to_cpu(profile->dispclk2gfxclk_b);
-	param->ulDispclk2GfxclkB =
-			le32_to_cpu(profile->dispclk2gfxclk_c);
-	param->ulDcefclk2GfxclkM1 =
-			le32_to_cpu(profile->dcefclk2gfxclk_a);
-	param->ulDcefclk2GfxclkM2 =
-			le32_to_cpu(profile->dcefclk2gfxclk_b);
-	param->ulDcefclk2GfxclkB =
-			le32_to_cpu(profile->dcefclk2gfxclk_c);
-	param->ulPixelclk2GfxclkM1 =
-			le32_to_cpu(profile->pixclk2gfxclk_a);
-	param->ulPixelclk2GfxclkM2 =
-			le32_to_cpu(profile->pixclk2gfxclk_b);
-	param->ulPixelclk2GfxclkB =
-			le32_to_cpu(profile->pixclk2gfxclk_c);
-	param->ulPhyclk2GfxclkM1 =
-			le32_to_cpu(profile->phyclk2gfxclk_a);
-	param->ulPhyclk2GfxclkM2 =
-			le32_to_cpu(profile->phyclk2gfxclk_b);
-	param->ulPhyclk2GfxclkB =
-			le32_to_cpu(profile->phyclk2gfxclk_c);
+	format_revision = ((struct atom_common_table_header *)profile)->format_revision;
+	content_revision = ((struct atom_common_table_header *)profile)->content_revision;
+
+	if (format_revision == 4 && content_revision == 1) {
+		param->ulMaxVddc = le32_to_cpu(profile->maxvddc);
+		param->ulMinVddc = le32_to_cpu(profile->minvddc);
+		param->ulMeanNsigmaAcontant0 =
+				le32_to_cpu(profile->avfs_meannsigma_acontant0);
+		param->ulMeanNsigmaAcontant1 =
+				le32_to_cpu(profile->avfs_meannsigma_acontant1);
+		param->ulMeanNsigmaAcontant2 =
+				le32_to_cpu(profile->avfs_meannsigma_acontant2);
+		param->usMeanNsigmaDcTolSigma =
+				le16_to_cpu(profile->avfs_meannsigma_dc_tol_sigma);
+		param->usMeanNsigmaPlatformMean =
+				le16_to_cpu(profile->avfs_meannsigma_platform_mean);
+		param->usMeanNsigmaPlatformSigma =
+				le16_to_cpu(profile->avfs_meannsigma_platform_sigma);
+		param->ulGbVdroopTableCksoffA0 =
+				le32_to_cpu(profile->gb_vdroop_table_cksoff_a0);
+		param->ulGbVdroopTableCksoffA1 =
+				le32_to_cpu(profile->gb_vdroop_table_cksoff_a1);
+		param->ulGbVdroopTableCksoffA2 =
+				le32_to_cpu(profile->gb_vdroop_table_cksoff_a2);
+		param->ulGbVdroopTableCksonA0 =
+				le32_to_cpu(profile->gb_vdroop_table_ckson_a0);
+		param->ulGbVdroopTableCksonA1 =
+				le32_to_cpu(profile->gb_vdroop_table_ckson_a1);
+		param->ulGbVdroopTableCksonA2 =
+				le32_to_cpu(profile->gb_vdroop_table_ckson_a2);
+		param->ulGbFuseTableCksoffM1 =
+				le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m1);
+		param->ulGbFuseTableCksoffM2 =
+				le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m2);
+		param->ulGbFuseTableCksoffB =
+				le32_to_cpu(profile->avfsgb_fuse_table_cksoff_b);
+		param->ulGbFuseTableCksonM1 =
+				le32_to_cpu(profile->avfsgb_fuse_table_ckson_m1);
+		param->ulGbFuseTableCksonM2 =
+				le32_to_cpu(profile->avfsgb_fuse_table_ckson_m2);
+		param->ulGbFuseTableCksonB =
+				le32_to_cpu(profile->avfsgb_fuse_table_ckson_b);
+
+		param->ucEnableGbVdroopTableCkson =
+				profile->enable_gb_vdroop_table_ckson;
+		param->ucEnableGbFuseTableCkson =
+				profile->enable_gb_fuse_table_ckson;
+		param->usPsmAgeComfactor =
+				le16_to_cpu(profile->psm_age_comfactor);
+
+		param->ulDispclk2GfxclkM1 =
+				le32_to_cpu(profile->dispclk2gfxclk_a);
+		param->ulDispclk2GfxclkM2 =
+				le32_to_cpu(profile->dispclk2gfxclk_b);
+		param->ulDispclk2GfxclkB =
+				le32_to_cpu(profile->dispclk2gfxclk_c);
+		param->ulDcefclk2GfxclkM1 =
+				le32_to_cpu(profile->dcefclk2gfxclk_a);
+		param->ulDcefclk2GfxclkM2 =
+				le32_to_cpu(profile->dcefclk2gfxclk_b);
+		param->ulDcefclk2GfxclkB =
+				le32_to_cpu(profile->dcefclk2gfxclk_c);
+		param->ulPixelclk2GfxclkM1 =
+				le32_to_cpu(profile->pixclk2gfxclk_a);
+		param->ulPixelclk2GfxclkM2 =
+				le32_to_cpu(profile->pixclk2gfxclk_b);
+		param->ulPixelclk2GfxclkB =
+				le32_to_cpu(profile->pixclk2gfxclk_c);
+		param->ulPhyclk2GfxclkM1 =
+				le32_to_cpu(profile->phyclk2gfxclk_a);
+		param->ulPhyclk2GfxclkM2 =
+				le32_to_cpu(profile->phyclk2gfxclk_b);
+		param->ulPhyclk2GfxclkB =
+				le32_to_cpu(profile->phyclk2gfxclk_c);
+		param->ulAcgGbVdroopTableA0           = 0;
+		param->ulAcgGbVdroopTableA1           = 0;
+		param->ulAcgGbVdroopTableA2           = 0;
+		param->ulAcgGbFuseTableM1             = 0;
+		param->ulAcgGbFuseTableM2             = 0;
+		param->ulAcgGbFuseTableB              = 0;
+		param->ucAcgEnableGbVdroopTable       = 0;
+		param->ucAcgEnableGbFuseTable         = 0;
+	} else if (format_revision == 4 && content_revision == 2) {
+		profile_v4_2 = (struct atom_asic_profiling_info_v4_2 *)profile;
+		param->ulMaxVddc = le32_to_cpu(profile_v4_2->maxvddc);
+		param->ulMinVddc = le32_to_cpu(profile_v4_2->minvddc);
+		param->ulMeanNsigmaAcontant0 =
+				le32_to_cpu(profile_v4_2->avfs_meannsigma_acontant0);
+		param->ulMeanNsigmaAcontant1 =
+				le32_to_cpu(profile_v4_2->avfs_meannsigma_acontant1);
+		param->ulMeanNsigmaAcontant2 =
+				le32_to_cpu(profile_v4_2->avfs_meannsigma_acontant2);
+		param->usMeanNsigmaDcTolSigma =
+				le16_to_cpu(profile_v4_2->avfs_meannsigma_dc_tol_sigma);
+		param->usMeanNsigmaPlatformMean =
+				le16_to_cpu(profile_v4_2->avfs_meannsigma_platform_mean);
+		param->usMeanNsigmaPlatformSigma =
+				le16_to_cpu(profile_v4_2->avfs_meannsigma_platform_sigma);
+		param->ulGbVdroopTableCksoffA0 =
+				le32_to_cpu(profile_v4_2->gb_vdroop_table_cksoff_a0);
+		param->ulGbVdroopTableCksoffA1 =
+				le32_to_cpu(profile_v4_2->gb_vdroop_table_cksoff_a1);
+		param->ulGbVdroopTableCksoffA2 =
+				le32_to_cpu(profile_v4_2->gb_vdroop_table_cksoff_a2);
+		param->ulGbVdroopTableCksonA0 =
+				le32_to_cpu(profile_v4_2->gb_vdroop_table_ckson_a0);
+		param->ulGbVdroopTableCksonA1 =
+				le32_to_cpu(profile_v4_2->gb_vdroop_table_ckson_a1);
+		param->ulGbVdroopTableCksonA2 =
+				le32_to_cpu(profile_v4_2->gb_vdroop_table_ckson_a2);
+		param->ulGbFuseTableCksoffM1 =
+				le32_to_cpu(profile_v4_2->avfsgb_fuse_table_cksoff_m1);
+		param->ulGbFuseTableCksoffM2 =
+				le32_to_cpu(profile_v4_2->avfsgb_fuse_table_cksoff_m2);
+		param->ulGbFuseTableCksoffB =
+				le32_to_cpu(profile_v4_2->avfsgb_fuse_table_cksoff_b);
+		param->ulGbFuseTableCksonM1 =
+				le32_to_cpu(profile_v4_2->avfsgb_fuse_table_ckson_m1);
+		param->ulGbFuseTableCksonM2 =
+				le32_to_cpu(profile_v4_2->avfsgb_fuse_table_ckson_m2);
+		param->ulGbFuseTableCksonB =
+				le32_to_cpu(profile_v4_2->avfsgb_fuse_table_ckson_b);
+
+		param->ucEnableGbVdroopTableCkson =
+				profile_v4_2->enable_gb_vdroop_table_ckson;
+		param->ucEnableGbFuseTableCkson =
+				profile_v4_2->enable_gb_fuse_table_ckson;
+		param->usPsmAgeComfactor =
+				le16_to_cpu(profile_v4_2->psm_age_comfactor);
+
+		param->ulDispclk2GfxclkM1 =
+				le32_to_cpu(profile_v4_2->dispclk2gfxclk_a);
+		param->ulDispclk2GfxclkM2 =
+				le32_to_cpu(profile_v4_2->dispclk2gfxclk_b);
+		param->ulDispclk2GfxclkB =
+				le32_to_cpu(profile_v4_2->dispclk2gfxclk_c);
+		param->ulDcefclk2GfxclkM1 =
+				le32_to_cpu(profile_v4_2->dcefclk2gfxclk_a);
+		param->ulDcefclk2GfxclkM2 =
+				le32_to_cpu(profile_v4_2->dcefclk2gfxclk_b);
+		param->ulDcefclk2GfxclkB =
+				le32_to_cpu(profile_v4_2->dcefclk2gfxclk_c);
+		param->ulPixelclk2GfxclkM1 =
+				le32_to_cpu(profile_v4_2->pixclk2gfxclk_a);
+		param->ulPixelclk2GfxclkM2 =
+				le32_to_cpu(profile_v4_2->pixclk2gfxclk_b);
+		param->ulPixelclk2GfxclkB =
+				le32_to_cpu(profile_v4_2->pixclk2gfxclk_c);
+		param->ulPhyclk2GfxclkM1 =
+				le32_to_cpu(profile->phyclk2gfxclk_a);
+		param->ulPhyclk2GfxclkM2 =
+				le32_to_cpu(profile_v4_2->phyclk2gfxclk_b);
+		param->ulPhyclk2GfxclkB =
+				le32_to_cpu(profile_v4_2->phyclk2gfxclk_c);
+		param->ulAcgGbVdroopTableA0 = le32_to_cpu(profile_v4_2->acg_gb_vdroop_table_a0);
+		param->ulAcgGbVdroopTableA1 = le32_to_cpu(profile_v4_2->acg_gb_vdroop_table_a1);
+		param->ulAcgGbVdroopTableA2 = le32_to_cpu(profile_v4_2->acg_gb_vdroop_table_a2);
+		param->ulAcgGbFuseTableM1 = le32_to_cpu(profile_v4_2->acg_avfsgb_fuse_table_m1);
+		param->ulAcgGbFuseTableM2 = le32_to_cpu(profile_v4_2->acg_avfsgb_fuse_table_m2);
+		param->ulAcgGbFuseTableB = le32_to_cpu(profile_v4_2->acg_avfsgb_fuse_table_b);
+		param->ucAcgEnableGbVdroopTable = le32_to_cpu(profile_v4_2->enable_acg_gb_vdroop_table);
+		param->ucAcgEnableGbFuseTable = le32_to_cpu(profile_v4_2->enable_acg_gb_fuse_table);
+	} else {
+		pr_info("Invalid VBIOS AVFS ProfilingInfo Revision!\n");
+		return -EINVAL;
+	}
 
 	return 0;
 }

+ 8 - 0
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h

@@ -109,6 +109,14 @@ struct pp_atomfwctrl_avfs_parameters {
 	uint32_t   ulPhyclk2GfxclkM1;
 	uint32_t   ulPhyclk2GfxclkM2;
 	uint32_t   ulPhyclk2GfxclkB;
+	uint32_t   ulAcgGbVdroopTableA0;
+	uint32_t   ulAcgGbVdroopTableA1;
+	uint32_t   ulAcgGbVdroopTableA2;
+	uint32_t   ulAcgGbFuseTableM1;
+	uint32_t   ulAcgGbFuseTableM2;
+	uint32_t   ulAcgGbFuseTableB;
+	uint32_t   ucAcgEnableGbVdroopTable;
+	uint32_t   ucAcgEnableGbFuseTable;
 };
 
 struct pp_atomfwctrl_gpio_parameters {

+ 9 - 0
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c

@@ -4630,6 +4630,15 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
 
 static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
 {
+	struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
+	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+
+	if (smu_data == NULL)
+		return -EINVAL;
+
+	if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+		return 0;
+
 	if (enable) {
 		if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
 				CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON))

+ 239 - 37
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c

@@ -78,6 +78,8 @@ uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK                                                        0x000000F0L
 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK                                                        0x00000700L
 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK                                                        0xFFFFF000L
+static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
+		enum pp_clock_type type, uint32_t mask);
 
 const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
 
@@ -146,6 +148,19 @@ static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
 	data->registry_data.vr1hot_enabled = 1;
 	data->registry_data.regulator_hot_gpio_support = 1;
 
+	data->registry_data.didt_support = 1;
+	if (data->registry_data.didt_support) {
+		data->registry_data.didt_mode = 6;
+		data->registry_data.sq_ramping_support = 1;
+		data->registry_data.db_ramping_support = 0;
+		data->registry_data.td_ramping_support = 0;
+		data->registry_data.tcp_ramping_support = 0;
+		data->registry_data.dbr_ramping_support = 0;
+		data->registry_data.edc_didt_support = 1;
+		data->registry_data.gc_didt_support = 0;
+		data->registry_data.psm_didt_support = 0;
+	}
+
 	data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT;
 	data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
 	data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
@@ -222,6 +237,8 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
 	/* assume disabled */
 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_PowerContainment);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_DiDtSupport);
 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_SQRamping);
 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
@@ -230,6 +247,34 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
 			PHM_PlatformCaps_TDRamping);
 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_TCPRamping);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_DBRRamping);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_DiDtEDCEnable);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_GCEDC);
+	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+			PHM_PlatformCaps_PSM);
+
+	if (data->registry_data.didt_support) {
+		phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport);
+		if (data->registry_data.sq_ramping_support)
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping);
+		if (data->registry_data.db_ramping_support)
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping);
+		if (data->registry_data.td_ramping_support)
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping);
+		if (data->registry_data.tcp_ramping_support)
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping);
+		if (data->registry_data.dbr_ramping_support)
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping);
+		if (data->registry_data.edc_didt_support)
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable);
+		if (data->registry_data.gc_didt_support)
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC);
+		if (data->registry_data.psm_didt_support)
+			phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
+	}
 
 	if (data->registry_data.power_containment_support)
 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
@@ -321,8 +366,8 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
 			FEATURE_LED_DISPLAY_BIT;
 	data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
 			FEATURE_FAN_CONTROL_BIT;
-	data->smu_features[GNLD_VOLTAGE_CONTROLLER].smu_feature_id =
-			FEATURE_VOLTAGE_CONTROLLER_BIT;
+	data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
+	data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
 
 	if (!data->registry_data.prefetcher_dpm_key_disabled)
 		data->smu_features[GNLD_DPM_PREFETCHER].supported = true;
@@ -386,6 +431,15 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
 	if (data->registry_data.vr0hot_enabled)
 		data->smu_features[GNLD_VR0HOT].supported = true;
 
+	smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetSmuVersion);
+	vega10_read_arg_from_smc(hwmgr->smumgr, &(data->smu_version));
+		/* ACG firmware has major version 5 */
+	if ((data->smu_version & 0xff000000) == 0x5000000)
+		data->smu_features[GNLD_ACG].supported = true;
+
+	if (data->registry_data.didt_support)
+		data->smu_features[GNLD_DIDT].supported = true;
+
 }
 
 #ifdef PPLIB_VEGA10_EVV_SUPPORT
@@ -2128,15 +2182,9 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
 			pp_table->AvfsGbCksOff.m2_shift = 12;
 			pp_table->AvfsGbCksOff.b_shift = 0;
 
-			for (i = 0; i < dep_table->count; i++) {
-				if (dep_table->entries[i].sclk_offset == 0)
-					pp_table->StaticVoltageOffsetVid[i] = 248;
-				else
-					pp_table->StaticVoltageOffsetVid[i] =
-						(uint8_t)(dep_table->entries[i].sclk_offset *
-								VOLTAGE_VID_OFFSET_SCALE2 /
-								VOLTAGE_VID_OFFSET_SCALE1);
-			}
+			for (i = 0; i < dep_table->count; i++)
+				pp_table->StaticVoltageOffsetVid[i] =
+						convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset));
 
 			if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
 					data->disp_clk_quad_eqn_a) &&
@@ -2228,6 +2276,21 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
 			pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24;
 			pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12;
 			pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12;
+
+			pp_table->AcgBtcGbVdroopTable.a0       = avfs_params.ulAcgGbVdroopTableA0;
+			pp_table->AcgBtcGbVdroopTable.a0_shift = 20;
+			pp_table->AcgBtcGbVdroopTable.a1       = avfs_params.ulAcgGbVdroopTableA1;
+			pp_table->AcgBtcGbVdroopTable.a1_shift = 20;
+			pp_table->AcgBtcGbVdroopTable.a2       = avfs_params.ulAcgGbVdroopTableA2;
+			pp_table->AcgBtcGbVdroopTable.a2_shift = 20;
+
+			pp_table->AcgAvfsGb.m1                   = avfs_params.ulAcgGbFuseTableM1;
+			pp_table->AcgAvfsGb.m2                   = avfs_params.ulAcgGbFuseTableM2;
+			pp_table->AcgAvfsGb.b                    = avfs_params.ulAcgGbFuseTableB;
+			pp_table->AcgAvfsGb.m1_shift             = 0;
+			pp_table->AcgAvfsGb.m2_shift             = 0;
+			pp_table->AcgAvfsGb.b_shift              = 0;
+
 		} else {
 			data->smu_features[GNLD_AVFS].supported = false;
 		}
@@ -2236,6 +2299,55 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
+static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
+{
+	struct vega10_hwmgr *data =
+			(struct vega10_hwmgr *)(hwmgr->backend);
+	uint32_t agc_btc_response;
+
+	if (data->smu_features[GNLD_ACG].supported) {
+		if (0 == vega10_enable_smc_features(hwmgr->smumgr, true,
+					data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
+			data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
+
+		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_InitializeAcg);
+
+		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgBtc);
+		vega10_read_arg_from_smc(hwmgr->smumgr, &agc_btc_response);;
+
+		if (1 == agc_btc_response) {
+			if (1 == data->acg_loop_state)
+				smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgInClosedLoop);
+			else if (2 == data->acg_loop_state)
+				smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgInOpenLoop);
+			if (0 == vega10_enable_smc_features(hwmgr->smumgr, true,
+				data->smu_features[GNLD_ACG].smu_feature_bitmap))
+					data->smu_features[GNLD_ACG].enabled = true;
+		} else {
+			pr_info("[ACG_Enable] ACG BTC Returned Failed Status!\n");
+			data->smu_features[GNLD_ACG].enabled = false;
+		}
+	}
+
+	return 0;
+}
+
+static int vega10_acg_disable(struct pp_hwmgr *hwmgr)
+{
+	struct vega10_hwmgr *data =
+			(struct vega10_hwmgr *)(hwmgr->backend);
+
+	if (data->smu_features[GNLD_ACG].supported) {
+		if (data->smu_features[GNLD_ACG].enabled) {
+		if (0 == vega10_enable_smc_features(hwmgr->smumgr, false,
+				data->smu_features[GNLD_ACG].smu_feature_bitmap))
+			data->smu_features[GNLD_ACG].enabled = false;
+		}
+	}
+
+	return 0;
+}
+
 static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
 {
 	struct vega10_hwmgr *data =
@@ -2506,7 +2618,7 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
 	result = vega10_avfs_enable(hwmgr, true);
 	PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
 					return result);
-
+	vega10_acg_enable(hwmgr);
 	vega10_save_default_power_profile(hwmgr);
 
 	return 0;
@@ -2838,6 +2950,11 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 	PP_ASSERT_WITH_CODE(!tmp_result,
 			"Failed to start DPM!", result = tmp_result);
 
+	/* enable didt, do not abort if failed didt */
+	tmp_result = vega10_enable_didt_config(hwmgr);
+	PP_ASSERT(!tmp_result,
+			"Failed to enable didt config!");
+
 	tmp_result = vega10_enable_power_containment(hwmgr);
 	PP_ASSERT_WITH_CODE(!tmp_result,
 			"Failed to enable power containment!",
@@ -4103,34 +4220,30 @@ static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
-static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
-				enum amd_dpm_forced_level level)
+static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
+				uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
 {
-	int ret = 0;
+	struct phm_ppt_v2_information *table_info =
+			(struct phm_ppt_v2_information *)(hwmgr->pptable);
 
-	switch (level) {
-	case AMD_DPM_FORCED_LEVEL_HIGH:
-		ret = vega10_force_dpm_highest(hwmgr);
-		if (ret)
-			return ret;
-		break;
-	case AMD_DPM_FORCED_LEVEL_LOW:
-		ret = vega10_force_dpm_lowest(hwmgr);
-		if (ret)
-			return ret;
-		break;
-	case AMD_DPM_FORCED_LEVEL_AUTO:
-		ret = vega10_unforce_dpm_levels(hwmgr);
-		if (ret)
-			return ret;
-		break;
-	default:
-		break;
+	if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL &&
+		table_info->vdd_dep_on_socclk->count > VEGA10_UMD_PSTATE_SOCCLK_LEVEL &&
+		table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) {
+		*sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
+		*soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
+		*mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
 	}
 
-	hwmgr->dpm_level = level;
-
-	return ret;
+	if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+		*sclk_mask = 0;
+	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+		*mclk_mask = 0;
+	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+		*sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
+		*soc_mask = table_info->vdd_dep_on_socclk->count - 1;
+		*mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
+	}
+	return 0;
 }
 
 static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
@@ -4157,6 +4270,86 @@ static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
 	return result;
 }
 
+static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+				enum amd_dpm_forced_level level)
+{
+	int ret = 0;
+	uint32_t sclk_mask = 0;
+	uint32_t mclk_mask = 0;
+	uint32_t soc_mask = 0;
+	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+
+	if (level == hwmgr->dpm_level)
+		return ret;
+
+	if (!(hwmgr->dpm_level & profile_mode_mask)) {
+		/* enter profile mode, save current level, disable gfx cg*/
+		if (level & profile_mode_mask) {
+			hwmgr->saved_dpm_level = hwmgr->dpm_level;
+			cgs_set_clockgating_state(hwmgr->device,
+						AMD_IP_BLOCK_TYPE_GFX,
+						AMD_CG_STATE_UNGATE);
+		}
+	} else {
+		/* exit profile mode, restore level, enable gfx cg*/
+		if (!(level & profile_mode_mask)) {
+			if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
+				level = hwmgr->saved_dpm_level;
+			cgs_set_clockgating_state(hwmgr->device,
+					AMD_IP_BLOCK_TYPE_GFX,
+					AMD_CG_STATE_GATE);
+		}
+	}
+
+	switch (level) {
+	case AMD_DPM_FORCED_LEVEL_HIGH:
+		ret = vega10_force_dpm_highest(hwmgr);
+		if (ret)
+			return ret;
+		hwmgr->dpm_level = level;
+		break;
+	case AMD_DPM_FORCED_LEVEL_LOW:
+		ret = vega10_force_dpm_lowest(hwmgr);
+		if (ret)
+			return ret;
+		hwmgr->dpm_level = level;
+		break;
+	case AMD_DPM_FORCED_LEVEL_AUTO:
+		ret = vega10_unforce_dpm_levels(hwmgr);
+		if (ret)
+			return ret;
+		hwmgr->dpm_level = level;
+		break;
+	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+		ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
+		if (ret)
+			return ret;
+		hwmgr->dpm_level = level;
+		vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
+		vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
+		break;
+	case AMD_DPM_FORCED_LEVEL_MANUAL:
+		hwmgr->dpm_level = level;
+		break;
+	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+	default:
+		break;
+	}
+
+	if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+		vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
+	else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+		vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
+
+	return 0;
+}
+
 static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
 {
 	struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
@@ -4402,7 +4595,9 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
 	struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
 	int i;
 
-	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+	if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
+				AMD_DPM_FORCED_LEVEL_LOW |
+				AMD_DPM_FORCED_LEVEL_HIGH))
 		return -EINVAL;
 
 	switch (type) {
@@ -4667,6 +4862,10 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
 	PP_ASSERT_WITH_CODE((tmp_result == 0),
 			"Failed to disable power containment!", result = tmp_result);
 
+	tmp_result = vega10_disable_didt_config(hwmgr);
+	PP_ASSERT_WITH_CODE((tmp_result == 0),
+			"Failed to disable didt config!", result = tmp_result);
+
 	tmp_result = vega10_avfs_enable(hwmgr, false);
 	PP_ASSERT_WITH_CODE((tmp_result == 0),
 			"Failed to disable AVFS!", result = tmp_result);
@@ -4683,6 +4882,9 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
 	PP_ASSERT_WITH_CODE((tmp_result == 0),
 			"Failed to disable ulv!", result = tmp_result);
 
+	tmp_result =  vega10_acg_disable(hwmgr);
+	PP_ASSERT_WITH_CODE((tmp_result == 0),
+			"Failed to disable acg!", result = tmp_result);
 	return result;
 }
 

+ 14 - 1
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h

@@ -64,7 +64,9 @@ enum {
 	GNLD_FW_CTF,
 	GNLD_LED_DISPLAY,
 	GNLD_FAN_CONTROL,
-	GNLD_VOLTAGE_CONTROLLER,
+	GNLD_FEATURE_FAST_PPT_BIT,
+	GNLD_DIDT,
+	GNLD_ACG,
 	GNLD_FEATURES_MAX
 };
 
@@ -230,7 +232,9 @@ struct vega10_registry_data {
 	uint8_t   cac_support;
 	uint8_t   clock_stretcher_support;
 	uint8_t   db_ramping_support;
+	uint8_t   didt_mode;
 	uint8_t   didt_support;
+	uint8_t   edc_didt_support;
 	uint8_t   dynamic_state_patching_support;
 	uint8_t   enable_pkg_pwr_tracking_feature;
 	uint8_t   enable_tdc_limit_feature;
@@ -263,6 +267,9 @@ struct vega10_registry_data {
 	uint8_t   tcp_ramping_support;
 	uint8_t   tdc_support;
 	uint8_t   td_ramping_support;
+	uint8_t   dbr_ramping_support;
+	uint8_t   gc_didt_support;
+	uint8_t   psm_didt_support;
 	uint8_t   thermal_out_gpio_support;
 	uint8_t   thermal_support;
 	uint8_t   fw_ctf_enabled;
@@ -381,6 +388,8 @@ struct vega10_hwmgr {
 	struct vega10_smc_state_table  smc_state_table;
 
 	uint32_t                       config_telemetry;
+	uint32_t                       smu_version;
+	uint32_t                       acg_loop_state;
 };
 
 #define VEGA10_DPM2_NEAR_TDP_DEC                      10
@@ -425,6 +434,10 @@ struct vega10_hwmgr {
 #define PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT      25 /* 10% * 255 = 25 */
 #define PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT  25 /* 10% * 255 = 25 */
 
+#define VEGA10_UMD_PSTATE_GFXCLK_LEVEL         0x3
+#define VEGA10_UMD_PSTATE_SOCCLK_LEVEL         0x3
+#define VEGA10_UMD_PSTATE_MCLK_LEVEL           0x2
+
 extern int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
 extern int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr);
 extern int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr);

+ 1291 - 0
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c

@@ -26,7 +26,1298 @@
 #include "vega10_powertune.h"
 #include "vega10_smumgr.h"
 #include "vega10_ppsmc.h"
+#include "vega10_inc.h"
 #include "pp_debug.h"
+#include "pp_soc15.h"
+
+static const struct vega10_didt_config_reg SEDiDtTuningCtrlConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	/* DIDT_SQ */
+	{   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,        DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,        0x3853 },
+	{   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,        DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,        0x3153 },
+
+	/* DIDT_TD */
+	{   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,        DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,        0x0dde },
+	{   ixDIDT_TD_TUNING_CTRL,             DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,        DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,        0x0dde },
+
+	/* DIDT_TCP */
+	{   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,       0x3dde },
+	{   ixDIDT_TCP_TUNING_CTRL,            DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,       0x3dde },
+
+	/* DIDT_DB */
+	{   ixDIDT_DB_TUNING_CTRL,             DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,        DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,        0x3dde },
+	{   ixDIDT_DB_TUNING_CTRL,             DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,        DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,        0x3dde },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEDiDtCtrl3Config_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset               Mask                                                     Shift                                                            Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	/*DIDT_SQ_CTRL3 */
+	{   ixDIDT_SQ_CTRL3,     DIDT_SQ_CTRL3__GC_DIDT_ENABLE_MASK,       DIDT_SQ_CTRL3__GC_DIDT_ENABLE__SHIFT,             0x0000 },
+	{   ixDIDT_SQ_CTRL3,     DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK,       DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT,             0x0000 },
+	{   ixDIDT_SQ_CTRL3,     DIDT_SQ_CTRL3__THROTTLE_POLICY_MASK,       DIDT_SQ_CTRL3__THROTTLE_POLICY__SHIFT,             0x0003 },
+	{   ixDIDT_SQ_CTRL3,     DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK,       DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT,             0x0000 },
+	{   ixDIDT_SQ_CTRL3,     DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK,       DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT,             0x0000 },
+	{   ixDIDT_SQ_CTRL3,     DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK,       DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT,             0x0003 },
+	{   ixDIDT_SQ_CTRL3,     DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK,       DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT,             0x0000 },
+	{   ixDIDT_SQ_CTRL3,     DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK,       DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT,             0x0000 },
+	{   ixDIDT_SQ_CTRL3,     DIDT_SQ_CTRL3__QUALIFY_STALL_EN_MASK,       DIDT_SQ_CTRL3__QUALIFY_STALL_EN__SHIFT,             0x0000 },
+	{   ixDIDT_SQ_CTRL3,     DIDT_SQ_CTRL3__DIDT_STALL_SEL_MASK,       DIDT_SQ_CTRL3__DIDT_STALL_SEL__SHIFT,             0x0000 },
+	{   ixDIDT_SQ_CTRL3,     DIDT_SQ_CTRL3__DIDT_FORCE_STALL_MASK,       DIDT_SQ_CTRL3__DIDT_FORCE_STALL__SHIFT,             0x0000 },
+	{   ixDIDT_SQ_CTRL3,     DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN_MASK,       DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN__SHIFT,             0x0000 },
+
+	/*DIDT_TCP_CTRL3 */
+	{   ixDIDT_TCP_CTRL3,    DIDT_TCP_CTRL3__GC_DIDT_ENABLE_MASK,      DIDT_TCP_CTRL3__GC_DIDT_ENABLE__SHIFT,            0x0000 },
+	{   ixDIDT_TCP_CTRL3,    DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK,      DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT,            0x0000 },
+	{   ixDIDT_TCP_CTRL3,    DIDT_TCP_CTRL3__THROTTLE_POLICY_MASK,      DIDT_TCP_CTRL3__THROTTLE_POLICY__SHIFT,            0x0003 },
+	{   ixDIDT_TCP_CTRL3,    DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK,      DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT,            0x0000 },
+	{   ixDIDT_TCP_CTRL3,    DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK,      DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT,            0x0000 },
+	{   ixDIDT_TCP_CTRL3,    DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK,      DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT,            0x0003 },
+	{   ixDIDT_TCP_CTRL3,    DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK,      DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT,            0x0000 },
+	{   ixDIDT_TCP_CTRL3,    DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK,      DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT,            0x0000 },
+	{   ixDIDT_TCP_CTRL3,    DIDT_TCP_CTRL3__QUALIFY_STALL_EN_MASK,      DIDT_TCP_CTRL3__QUALIFY_STALL_EN__SHIFT,            0x0000 },
+	{   ixDIDT_TCP_CTRL3,    DIDT_TCP_CTRL3__DIDT_STALL_SEL_MASK,      DIDT_TCP_CTRL3__DIDT_STALL_SEL__SHIFT,            0x0000 },
+	{   ixDIDT_TCP_CTRL3,    DIDT_TCP_CTRL3__DIDT_FORCE_STALL_MASK,      DIDT_TCP_CTRL3__DIDT_FORCE_STALL__SHIFT,            0x0000 },
+	{   ixDIDT_TCP_CTRL3,    DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN_MASK,      DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN__SHIFT,            0x0000 },
+
+	/*DIDT_TD_CTRL3 */
+	{   ixDIDT_TD_CTRL3,     DIDT_TD_CTRL3__GC_DIDT_ENABLE_MASK,       DIDT_TD_CTRL3__GC_DIDT_ENABLE__SHIFT,             0x0000 },
+	{   ixDIDT_TD_CTRL3,     DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK,       DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT,             0x0000 },
+	{   ixDIDT_TD_CTRL3,     DIDT_TD_CTRL3__THROTTLE_POLICY_MASK,       DIDT_TD_CTRL3__THROTTLE_POLICY__SHIFT,             0x0003 },
+	{   ixDIDT_TD_CTRL3,     DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK,       DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT,             0x0000 },
+	{   ixDIDT_TD_CTRL3,     DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK,       DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT,             0x0000 },
+	{   ixDIDT_TD_CTRL3,     DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK,       DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT,             0x0003 },
+	{   ixDIDT_TD_CTRL3,     DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK,       DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT,             0x0000 },
+	{   ixDIDT_TD_CTRL3,     DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK,       DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT,             0x0000 },
+	{   ixDIDT_TD_CTRL3,     DIDT_TD_CTRL3__QUALIFY_STALL_EN_MASK,       DIDT_TD_CTRL3__QUALIFY_STALL_EN__SHIFT,             0x0000 },
+	{   ixDIDT_TD_CTRL3,     DIDT_TD_CTRL3__DIDT_STALL_SEL_MASK,       DIDT_TD_CTRL3__DIDT_STALL_SEL__SHIFT,             0x0000 },
+	{   ixDIDT_TD_CTRL3,     DIDT_TD_CTRL3__DIDT_FORCE_STALL_MASK,       DIDT_TD_CTRL3__DIDT_FORCE_STALL__SHIFT,             0x0000 },
+	{   ixDIDT_TD_CTRL3,     DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN_MASK,       DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN__SHIFT,             0x0000 },
+
+	/*DIDT_DB_CTRL3 */
+	{   ixDIDT_DB_CTRL3,     DIDT_DB_CTRL3__GC_DIDT_ENABLE_MASK,       DIDT_DB_CTRL3__GC_DIDT_ENABLE__SHIFT,             0x0000 },
+	{   ixDIDT_DB_CTRL3,     DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK,       DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT,             0x0000 },
+	{   ixDIDT_DB_CTRL3,     DIDT_DB_CTRL3__THROTTLE_POLICY_MASK,       DIDT_DB_CTRL3__THROTTLE_POLICY__SHIFT,             0x0003 },
+	{   ixDIDT_DB_CTRL3,     DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK,       DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT,             0x0000 },
+	{   ixDIDT_DB_CTRL3,     DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK,       DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT,             0x0000 },
+	{   ixDIDT_DB_CTRL3,     DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK,       DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT,             0x0003 },
+	{   ixDIDT_DB_CTRL3,     DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK,       DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT,             0x0000 },
+	{   ixDIDT_DB_CTRL3,     DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK,       DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT,             0x0000 },
+	{   ixDIDT_DB_CTRL3,     DIDT_DB_CTRL3__QUALIFY_STALL_EN_MASK,       DIDT_DB_CTRL3__QUALIFY_STALL_EN__SHIFT,             0x0000 },
+	{   ixDIDT_DB_CTRL3,     DIDT_DB_CTRL3__DIDT_STALL_SEL_MASK,       DIDT_DB_CTRL3__DIDT_STALL_SEL__SHIFT,             0x0000 },
+	{   ixDIDT_DB_CTRL3,     DIDT_DB_CTRL3__DIDT_FORCE_STALL_MASK,       DIDT_DB_CTRL3__DIDT_FORCE_STALL__SHIFT,             0x0000 },
+	{   ixDIDT_DB_CTRL3,     DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN_MASK,       DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN__SHIFT,             0x0000 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEDiDtCtrl2Config_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                            Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	/* DIDT_SQ */
+	{   ixDIDT_SQ_CTRL2,                  DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK,                 DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT,                 0x3853 },
+	{   ixDIDT_SQ_CTRL2,                  DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,        DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,        0x00c0 },
+	{   ixDIDT_SQ_CTRL2,                  DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,        DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,        0x0000 },
+
+	/* DIDT_TD */
+	{   ixDIDT_TD_CTRL2,                  DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK,                 DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT,                 0x3fff },
+	{   ixDIDT_TD_CTRL2,                  DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,        DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,        0x00c0 },
+	{   ixDIDT_TD_CTRL2,                  DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,        DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,        0x0001 },
+
+	/* DIDT_TCP */
+	{   ixDIDT_TCP_CTRL2,                 DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT,                0x3dde },
+	{   ixDIDT_TCP_CTRL2,                 DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,       0x00c0 },
+	{   ixDIDT_TCP_CTRL2,                 DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,       0x0001 },
+
+	/* DIDT_DB */
+	{   ixDIDT_DB_CTRL2,                  DIDT_DB_CTRL2__MAX_POWER_DELTA_MASK,                 DIDT_DB_CTRL2__MAX_POWER_DELTA__SHIFT,                 0x3dde },
+	{   ixDIDT_DB_CTRL2,                  DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,        DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,        0x00c0 },
+	{   ixDIDT_DB_CTRL2,                  DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,        DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,        0x0001 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEDiDtCtrl1Config_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	/* DIDT_SQ */
+	{   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MIN_POWER_MASK,                       DIDT_SQ_CTRL1__MIN_POWER__SHIFT,                       0x0000 },
+	{   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MAX_POWER_MASK,                       DIDT_SQ_CTRL1__MAX_POWER__SHIFT,                       0xffff },
+	/* DIDT_TD */
+	{   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MIN_POWER_MASK,                       DIDT_TD_CTRL1__MIN_POWER__SHIFT,                       0x0000 },
+	{   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MAX_POWER_MASK,                       DIDT_TD_CTRL1__MAX_POWER__SHIFT,                       0xffff },
+	/* DIDT_TCP */
+	{   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MIN_POWER_MASK,                      DIDT_TCP_CTRL1__MIN_POWER__SHIFT,                      0x0000 },
+	{   ixDIDT_TCP_CTRL1,                  DIDT_TCP_CTRL1__MAX_POWER_MASK,                      DIDT_TCP_CTRL1__MAX_POWER__SHIFT,                      0xffff },
+	/* DIDT_DB */
+	{   ixDIDT_DB_CTRL1,                   DIDT_DB_CTRL1__MIN_POWER_MASK,                       DIDT_DB_CTRL1__MIN_POWER__SHIFT,                       0x0000 },
+	{   ixDIDT_DB_CTRL1,                   DIDT_DB_CTRL1__MAX_POWER_MASK,                       DIDT_DB_CTRL1__MAX_POWER__SHIFT,                       0xffff },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+
+static const struct vega10_didt_config_reg SEDiDtWeightConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                  Shift                                                 Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	/* DIDT_SQ */
+	{   ixDIDT_SQ_WEIGHT0_3,               0xFFFFFFFF,                                           0,                                                    0x2B363B1A },
+	{   ixDIDT_SQ_WEIGHT4_7,               0xFFFFFFFF,                                           0,                                                    0x270B2432 },
+	{   ixDIDT_SQ_WEIGHT8_11,              0xFFFFFFFF,                                           0,                                                    0x00000018 },
+
+	/* DIDT_TD */
+	{   ixDIDT_TD_WEIGHT0_3,               0xFFFFFFFF,                                           0,                                                    0x2B1D220F },
+	{   ixDIDT_TD_WEIGHT4_7,               0xFFFFFFFF,                                           0,                                                    0x00007558 },
+	{   ixDIDT_TD_WEIGHT8_11,              0xFFFFFFFF,                                           0,                                                    0x00000000 },
+
+	/* DIDT_TCP */
+	{   ixDIDT_TCP_WEIGHT0_3,               0xFFFFFFFF,                                          0,                                                    0x5ACE160D },
+	{   ixDIDT_TCP_WEIGHT4_7,               0xFFFFFFFF,                                          0,                                                    0x00000000 },
+	{   ixDIDT_TCP_WEIGHT8_11,              0xFFFFFFFF,                                          0,                                                    0x00000000 },
+
+	/* DIDT_DB */
+	{   ixDIDT_DB_WEIGHT0_3,                0xFFFFFFFF,                                          0,                                                    0x0E152A0F },
+	{   ixDIDT_DB_WEIGHT4_7,                0xFFFFFFFF,                                          0,                                                    0x09061813 },
+	{   ixDIDT_DB_WEIGHT8_11,               0xFFFFFFFF,                                          0,                                                    0x00000013 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEDiDtCtrl0Config_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	/* DIDT_SQ */
+	{  ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK,   DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT,  0x0000 },
+	{  ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__PHASE_OFFSET_MASK,   DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT,  0x0000 },
+	{  ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK,   DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT,  0x0000 },
+	{  ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,   DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,  0x0000 },
+	{  ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN_MASK,   DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN__SHIFT,  0x0001 },
+	{  ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN_MASK,   DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT,  0x0001 },
+	{  ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK,   DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT,  0x0001 },
+	{  ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT,  0xffff },
+	{  ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN_MASK,   DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN__SHIFT,  0x0000 },
+	{  ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN_MASK,   DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN__SHIFT,  0x0000 },
+	{  ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK,   DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT,  0x0000 },
+	/* DIDT_TD */
+	{  ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK,   DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT,  0x0000 },
+	{  ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__PHASE_OFFSET_MASK,   DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT,  0x0000 },
+	{  ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK,   DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT,  0x0000 },
+	{  ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,   DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,  0x0000 },
+	{  ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN_MASK,   DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN__SHIFT,  0x0001 },
+	{  ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN_MASK,   DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT,  0x0001 },
+	{  ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK,   DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT,  0x0001 },
+	{  ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT,  0xffff },
+	{  ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN_MASK,   DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN__SHIFT,  0x0000 },
+	{  ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN_MASK,   DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN__SHIFT,  0x0000 },
+	{  ixDIDT_TD_CTRL0,                   DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK,   DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT,  0x0000 },
+	/* DIDT_TCP */
+	{  ixDIDT_TCP_CTRL0,                  DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK,  DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
+	{  ixDIDT_TCP_CTRL0,                  DIDT_TCP_CTRL0__PHASE_OFFSET_MASK,  DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
+	{  ixDIDT_TCP_CTRL0,                  DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK,  DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
+	{  ixDIDT_TCP_CTRL0,                  DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,  DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+	{  ixDIDT_TCP_CTRL0,                  DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN_MASK,  DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
+	{  ixDIDT_TCP_CTRL0,                  DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN_MASK,  DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
+	{  ixDIDT_TCP_CTRL0,                  DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK,  DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
+	{  ixDIDT_TCP_CTRL0,                  DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK,  DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
+	{  ixDIDT_TCP_CTRL0,                  DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN_MASK,  DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
+	{  ixDIDT_TCP_CTRL0,                  DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN_MASK,  DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
+	{  ixDIDT_TCP_CTRL0,                  DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK,  DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
+	/* DIDT_DB */
+	{  ixDIDT_DB_CTRL0,                   DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK,   DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT,  0x0000 },
+	{  ixDIDT_DB_CTRL0,                   DIDT_DB_CTRL0__PHASE_OFFSET_MASK,   DIDT_DB_CTRL0__PHASE_OFFSET__SHIFT,  0x0000 },
+	{  ixDIDT_DB_CTRL0,                   DIDT_DB_CTRL0__DIDT_CTRL_RST_MASK,   DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT,  0x0000 },
+	{  ixDIDT_DB_CTRL0,                   DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,   DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,  0x0000 },
+	{  ixDIDT_DB_CTRL0,                   DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN_MASK,   DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN__SHIFT,  0x0001 },
+	{  ixDIDT_DB_CTRL0,                   DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN_MASK,   DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT,  0x0001 },
+	{  ixDIDT_DB_CTRL0,                   DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK,   DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT,  0x0001 },
+	{  ixDIDT_DB_CTRL0,                   DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT,  0xffff },
+	{  ixDIDT_DB_CTRL0,                   DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN_MASK,   DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN__SHIFT,  0x0000 },
+	{  ixDIDT_DB_CTRL0,                   DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN_MASK,   DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN__SHIFT,  0x0000 },
+	{  ixDIDT_DB_CTRL0,                   DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK,   DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT,  0x0000 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+
+static const struct vega10_didt_config_reg SEDiDtStallCtrlConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                   Mask                                                     Shift                                                      Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	/* DIDT_SQ */
+	{   ixDIDT_SQ_STALL_CTRL,    DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,    DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0004 },
+	{   ixDIDT_SQ_STALL_CTRL,    DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,    DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0004 },
+	{   ixDIDT_SQ_STALL_CTRL,    DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK,    DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,     0x000a },
+	{   ixDIDT_SQ_STALL_CTRL,    DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK,    DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,     0x000a },
+
+	/* DIDT_TD */
+	{   ixDIDT_TD_STALL_CTRL,    DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,    DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001 },
+	{   ixDIDT_TD_STALL_CTRL,    DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,    DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001 },
+	{   ixDIDT_TD_STALL_CTRL,    DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK,    DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,     0x000a },
+	{   ixDIDT_TD_STALL_CTRL,    DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK,    DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,     0x000a },
+
+	/* DIDT_TCP */
+	{   ixDIDT_TCP_STALL_CTRL,   DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,   DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,    0x0001 },
+	{   ixDIDT_TCP_STALL_CTRL,   DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,   DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,    0x0001 },
+	{   ixDIDT_TCP_STALL_CTRL,   DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK,   DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,    0x000a },
+	{   ixDIDT_TCP_STALL_CTRL,   DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK,   DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,    0x000a },
+
+	/* DIDT_DB */
+	{   ixDIDT_DB_STALL_CTRL,    DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,    DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0004 },
+	{   ixDIDT_DB_STALL_CTRL,    DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,    DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0004 },
+	{   ixDIDT_DB_STALL_CTRL,    DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK,    DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,     0x000a },
+	{   ixDIDT_DB_STALL_CTRL,    DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK,    DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,     0x000a },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEDiDtStallPatternConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                        Mask                                                      Shift                                                    Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	/* DIDT_SQ_STALL_PATTERN_1_2 */
+	{   ixDIDT_SQ_STALL_PATTERN_1_2,  DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK,    DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT,  0x0001 },
+	{   ixDIDT_SQ_STALL_PATTERN_1_2,  DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK,    DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT,  0x0001 },
+
+	/* DIDT_SQ_STALL_PATTERN_3_4 */
+	{   ixDIDT_SQ_STALL_PATTERN_3_4,  DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK,    DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT,  0x0001 },
+	{   ixDIDT_SQ_STALL_PATTERN_3_4,  DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK,    DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT,  0x0001 },
+
+	/* DIDT_SQ_STALL_PATTERN_5_6 */
+	{   ixDIDT_SQ_STALL_PATTERN_5_6,  DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK,    DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT,  0x0000 },
+	{   ixDIDT_SQ_STALL_PATTERN_5_6,  DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK,    DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT,  0x0000 },
+
+	/* DIDT_SQ_STALL_PATTERN_7 */
+	{   ixDIDT_SQ_STALL_PATTERN_7,    DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK,      DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT,    0x0000 },
+
+	/* DIDT_TCP_STALL_PATTERN_1_2 */
+	{   ixDIDT_TCP_STALL_PATTERN_1_2, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK,   DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
+	{   ixDIDT_TCP_STALL_PATTERN_1_2, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK,   DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
+
+	/* DIDT_TCP_STALL_PATTERN_3_4 */
+	{   ixDIDT_TCP_STALL_PATTERN_3_4, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK,   DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
+	{   ixDIDT_TCP_STALL_PATTERN_3_4, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK,   DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
+
+	/* DIDT_TCP_STALL_PATTERN_5_6 */
+	{   ixDIDT_TCP_STALL_PATTERN_5_6, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK,   DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
+	{   ixDIDT_TCP_STALL_PATTERN_5_6, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK,   DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
+
+	/* DIDT_TCP_STALL_PATTERN_7 */
+	{   ixDIDT_TCP_STALL_PATTERN_7,   DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK,     DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT,   0x0000 },
+
+	/* DIDT_TD_STALL_PATTERN_1_2 */
+	{   ixDIDT_TD_STALL_PATTERN_1_2,  DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK,    DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT,  0x0001 },
+	{   ixDIDT_TD_STALL_PATTERN_1_2,  DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK,    DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT,  0x0001 },
+
+	/* DIDT_TD_STALL_PATTERN_3_4 */
+	{   ixDIDT_TD_STALL_PATTERN_3_4,  DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK,    DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT,  0x0001 },
+	{   ixDIDT_TD_STALL_PATTERN_3_4,  DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK,    DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT,  0x0001 },
+
+	/* DIDT_TD_STALL_PATTERN_5_6 */
+	{   ixDIDT_TD_STALL_PATTERN_5_6,  DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK,    DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT,  0x0000 },
+	{   ixDIDT_TD_STALL_PATTERN_5_6,  DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK,    DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT,  0x0000 },
+
+	/* DIDT_TD_STALL_PATTERN_7 */
+	{   ixDIDT_TD_STALL_PATTERN_7,    DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK,      DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT,    0x0000 },
+
+	/* DIDT_DB_STALL_PATTERN_1_2 */
+	{   ixDIDT_DB_STALL_PATTERN_1_2,  DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK,    DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT,  0x0001 },
+	{   ixDIDT_DB_STALL_PATTERN_1_2,  DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK,    DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT,  0x0001 },
+
+	/* DIDT_DB_STALL_PATTERN_3_4 */
+	{   ixDIDT_DB_STALL_PATTERN_3_4,  DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK,    DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT,  0x0001 },
+	{   ixDIDT_DB_STALL_PATTERN_3_4,  DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK,    DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT,  0x0001 },
+
+	/* DIDT_DB_STALL_PATTERN_5_6 */
+	{   ixDIDT_DB_STALL_PATTERN_5_6,  DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK,    DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT,  0x0000 },
+	{   ixDIDT_DB_STALL_PATTERN_5_6,  DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK,    DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT,  0x0000 },
+
+	/* DIDT_DB_STALL_PATTERN_7 */
+	{   ixDIDT_DB_STALL_PATTERN_7,    DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK,      DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT,    0x0000 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg SELCacConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	/* SQ */
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x00060021 },
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x00860021 },
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x01060021 },
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x01860021 },
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x02060021 },
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x02860021 },
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x03060021 },
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x03860021 },
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x04060021 },
+	/* TD */
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x000E0020 },
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x008E0020 },
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x010E0020 },
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x018E0020 },
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x020E0020 },
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x028E0020 },
+	/* TCP */
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x001c0020 },
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x009c0020 },
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x011c0020 },
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x019c0020 },
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x021c0020 },
+	/* DB */
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x00200008 },
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x00820008 },
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x01020008 },
+	{   ixSE_CAC_CNTL,                     0xFFFFFFFF,                                          0,                                                     0x01820008 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+
+static const struct vega10_didt_config_reg SEEDCStallPatternConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	/* SQ */
+	{   ixDIDT_SQ_EDC_STALL_PATTERN_1_2,   0xFFFFFFFF,                                          0,                                                     0x00030001 },
+	{   ixDIDT_SQ_EDC_STALL_PATTERN_3_4,   0xFFFFFFFF,                                          0,                                                     0x000F0007 },
+	{   ixDIDT_SQ_EDC_STALL_PATTERN_5_6,   0xFFFFFFFF,                                          0,                                                     0x003F001F },
+	{   ixDIDT_SQ_EDC_STALL_PATTERN_7,     0xFFFFFFFF,                                          0,                                                     0x0000007F },
+	/* TD */
+	{   ixDIDT_TD_EDC_STALL_PATTERN_1_2,   0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	{   ixDIDT_TD_EDC_STALL_PATTERN_3_4,   0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	{   ixDIDT_TD_EDC_STALL_PATTERN_5_6,   0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	{   ixDIDT_TD_EDC_STALL_PATTERN_7,     0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	/* TCP */
+	{   ixDIDT_TCP_EDC_STALL_PATTERN_1_2,   0xFFFFFFFF,                                         0,                                                     0x00000000 },
+	{   ixDIDT_TCP_EDC_STALL_PATTERN_3_4,   0xFFFFFFFF,                                         0,                                                     0x00000000 },
+	{   ixDIDT_TCP_EDC_STALL_PATTERN_5_6,   0xFFFFFFFF,                                         0,                                                     0x00000000 },
+	{   ixDIDT_TCP_EDC_STALL_PATTERN_7,     0xFFFFFFFF,                                         0,                                                     0x00000000 },
+	/* DB */
+	{   ixDIDT_DB_EDC_STALL_PATTERN_1_2,   0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	{   ixDIDT_DB_EDC_STALL_PATTERN_3_4,   0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	{   ixDIDT_DB_EDC_STALL_PATTERN_5_6,   0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	{   ixDIDT_DB_EDC_STALL_PATTERN_7,     0xFFFFFFFF,                                          0,                                                     0x00000000 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEEDCForceStallPatternConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	/* SQ */
+	{   ixDIDT_SQ_EDC_STALL_PATTERN_1_2,   0xFFFFFFFF,                                          0,                                                     0x00000015 },
+	{   ixDIDT_SQ_EDC_STALL_PATTERN_3_4,   0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	{   ixDIDT_SQ_EDC_STALL_PATTERN_5_6,   0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	{   ixDIDT_SQ_EDC_STALL_PATTERN_7,     0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	/* TD */
+	{   ixDIDT_TD_EDC_STALL_PATTERN_1_2,   0xFFFFFFFF,                                          0,                                                     0x00000015 },
+	{   ixDIDT_TD_EDC_STALL_PATTERN_3_4,   0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	{   ixDIDT_TD_EDC_STALL_PATTERN_5_6,   0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	{   ixDIDT_TD_EDC_STALL_PATTERN_7,     0xFFFFFFFF,                                          0,                                                     0x00000000 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEEDCStallDelayConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	/* SQ */
+	{   ixDIDT_SQ_EDC_STALL_DELAY_1,       0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	{   ixDIDT_SQ_EDC_STALL_DELAY_2,       0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	{   ixDIDT_SQ_EDC_STALL_DELAY_3,       0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	{   ixDIDT_SQ_EDC_STALL_DELAY_4,       0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	/* TD */
+	{   ixDIDT_TD_EDC_STALL_DELAY_1,       0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	{   ixDIDT_TD_EDC_STALL_DELAY_2,       0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	{   ixDIDT_TD_EDC_STALL_DELAY_3,       0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	{   ixDIDT_TD_EDC_STALL_DELAY_4,       0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	/* TCP */
+	{   ixDIDT_TCP_EDC_STALL_DELAY_1,      0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	{   ixDIDT_TCP_EDC_STALL_DELAY_2,      0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	{   ixDIDT_TCP_EDC_STALL_DELAY_3,      0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	{   ixDIDT_TCP_EDC_STALL_DELAY_4,      0xFFFFFFFF,                                          0,                                                     0x00000000 },
+	/* DB */
+	{   ixDIDT_DB_EDC_STALL_DELAY_1,       0xFFFFFFFF,                                          0,                                                     0x00000000 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEEDCThresholdConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	{   ixDIDT_SQ_EDC_THRESHOLD,           0xFFFFFFFF,                                          0,                                                     0x0000010E },
+	{   ixDIDT_TD_EDC_THRESHOLD,           0xFFFFFFFF,                                          0,                                                     0xFFFFFFFF },
+	{   ixDIDT_TCP_EDC_THRESHOLD,          0xFFFFFFFF,                                          0,                                                     0xFFFFFFFF },
+	{   ixDIDT_DB_EDC_THRESHOLD,           0xFFFFFFFF,                                          0,                                                     0xFFFFFFFF },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEEDCCtrlResetConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	/* SQ */
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_EN_MASK,                       DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT,                        0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK,                   DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT,                    0x0001 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK,          DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT,           0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK,              DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT,               0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK,  DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT,   0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK,   DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT,    0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK,     DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT,      0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK,                    DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT,                     0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK,          DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT,           0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK,         DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT,          0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK,         DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT,          0x0000 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEEDCCtrlConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	/* SQ */
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_EN_MASK,                       DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT,                        0x0001 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK,                   DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT,                    0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK,          DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT,           0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK,              DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT,               0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK,  DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT,   0x0004 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK,   DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT,    0x0006 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK,     DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT,      0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK,                    DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT,                     0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK,          DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT,           0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK,         DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT,          0x0001 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK,         DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT,          0x0000 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEEDCCtrlForceStallConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	/* SQ */
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_EN_MASK,                       DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT,                        0x0001 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK,                   DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT,                    0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK,          DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT,           0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK,              DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT,               0x0001 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK,  DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT,   0x0001 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK,   DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT,    0x000C },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK,     DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT,      0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK,                    DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT,                     0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK,          DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT,           0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK,         DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT,          0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK,         DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT,          0x0001 },
+
+	/* TD */
+	{   ixDIDT_TD_EDC_CTRL,                DIDT_TD_EDC_CTRL__EDC_EN_MASK,                       DIDT_TD_EDC_CTRL__EDC_EN__SHIFT,                        0x0001 },
+	{   ixDIDT_TD_EDC_CTRL,                DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK,                   DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT,                    0x0000 },
+	{   ixDIDT_TD_EDC_CTRL,                DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK,          DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT,           0x0000 },
+	{   ixDIDT_TD_EDC_CTRL,                DIDT_TD_EDC_CTRL__EDC_FORCE_STALL_MASK,              DIDT_TD_EDC_CTRL__EDC_FORCE_STALL__SHIFT,               0x0001 },
+	{   ixDIDT_TD_EDC_CTRL,                DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK,  DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT,   0x0001 },
+	{   ixDIDT_TD_EDC_CTRL,                DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK,   DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT,    0x000E },
+	{   ixDIDT_TD_EDC_CTRL,                DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK,     DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT,      0x0000 },
+	{   ixDIDT_TD_EDC_CTRL,                DIDT_TD_EDC_CTRL__GC_EDC_EN_MASK,                    DIDT_TD_EDC_CTRL__GC_EDC_EN__SHIFT,                     0x0000 },
+	{   ixDIDT_TD_EDC_CTRL,                DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY_MASK,          DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT,           0x0000 },
+	{   ixDIDT_TD_EDC_CTRL,                DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK,         DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT,          0x0000 },
+	{   ixDIDT_TD_EDC_CTRL,                DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK,         DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT,          0x0001 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg    GCDiDtDroopCtrlConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	{   mmGC_DIDT_DROOP_CTRL,             GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN_MASK,   GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN__SHIFT,  0x0000 },
+	{   mmGC_DIDT_DROOP_CTRL,             GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD_MASK,   GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD__SHIFT,  0x0000 },
+	{   mmGC_DIDT_DROOP_CTRL,             GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX_MASK,   GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX__SHIFT,  0x0000 },
+	{   mmGC_DIDT_DROOP_CTRL,             GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL_MASK,   GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL__SHIFT,  0x0000 },
+	{   mmGC_DIDT_DROOP_CTRL,             GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW_MASK,   GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW__SHIFT,  0x0000 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg    GCDiDtCtrl0Config_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	{   mmGC_DIDT_CTRL0,                  GC_DIDT_CTRL0__DIDT_CTRL_EN_MASK,   GC_DIDT_CTRL0__DIDT_CTRL_EN__SHIFT,  0x0000 },
+	{   mmGC_DIDT_CTRL0,                  GC_DIDT_CTRL0__PHASE_OFFSET_MASK,   GC_DIDT_CTRL0__PHASE_OFFSET__SHIFT,  0x0000 },
+	{   mmGC_DIDT_CTRL0,                  GC_DIDT_CTRL0__DIDT_SW_RST_MASK,   GC_DIDT_CTRL0__DIDT_SW_RST__SHIFT,  0x0000 },
+	{   mmGC_DIDT_CTRL0,                  GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,   GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,  0x0000 },
+	{   mmGC_DIDT_CTRL0,                  GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK,   GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT,  0x0000 },
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+
+static const struct vega10_didt_config_reg   PSMSEEDCStallPatternConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	/* SQ EDC STALL PATTERNs */
+	{   ixDIDT_SQ_EDC_STALL_PATTERN_1_2,  DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK,   DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT,   0x0101 },
+	{   ixDIDT_SQ_EDC_STALL_PATTERN_1_2,  DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK,   DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT,   0x0101 },
+	{   ixDIDT_SQ_EDC_STALL_PATTERN_3_4,  DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK,   DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT,   0x1111 },
+	{   ixDIDT_SQ_EDC_STALL_PATTERN_3_4,  DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK,   DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT,   0x1111 },
+
+	{   ixDIDT_SQ_EDC_STALL_PATTERN_5_6,  DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK,   DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT,   0x1515 },
+	{   ixDIDT_SQ_EDC_STALL_PATTERN_5_6,  DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK,   DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT,   0x1515 },
+
+	{   ixDIDT_SQ_EDC_STALL_PATTERN_7,  DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK,   DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT,     0x5555 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg   PSMSEEDCStallDelayConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	/* SQ EDC STALL DELAYs */
+	{   ixDIDT_SQ_EDC_STALL_DELAY_1,      DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0_MASK,  DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0__SHIFT,  0x0000 },
+	{   ixDIDT_SQ_EDC_STALL_DELAY_1,      DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1_MASK,  DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1__SHIFT,  0x0000 },
+	{   ixDIDT_SQ_EDC_STALL_DELAY_1,      DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2_MASK,  DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2__SHIFT,  0x0000 },
+	{   ixDIDT_SQ_EDC_STALL_DELAY_1,      DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3_MASK,  DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3__SHIFT,  0x0000 },
+
+	{   ixDIDT_SQ_EDC_STALL_DELAY_2,      DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4_MASK,  DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4__SHIFT,  0x0000 },
+	{   ixDIDT_SQ_EDC_STALL_DELAY_2,      DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ5_MASK,  DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ5__SHIFT,  0x0000 },
+	{   ixDIDT_SQ_EDC_STALL_DELAY_2,      DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ6_MASK,  DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ6__SHIFT,  0x0000 },
+	{   ixDIDT_SQ_EDC_STALL_DELAY_2,      DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ7_MASK,  DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ7__SHIFT,  0x0000 },
+
+	{   ixDIDT_SQ_EDC_STALL_DELAY_3,      DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ8_MASK,  DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ8__SHIFT,  0x0000 },
+	{   ixDIDT_SQ_EDC_STALL_DELAY_3,      DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ9_MASK,  DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ9__SHIFT,  0x0000 },
+	{   ixDIDT_SQ_EDC_STALL_DELAY_3,      DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ10_MASK, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ10__SHIFT, 0x0000 },
+	{   ixDIDT_SQ_EDC_STALL_DELAY_3,      DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ11_MASK, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ11__SHIFT, 0x0000 },
+
+	{   ixDIDT_SQ_EDC_STALL_DELAY_4,      DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ12_MASK, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ12__SHIFT, 0x0000 },
+	{   ixDIDT_SQ_EDC_STALL_DELAY_4,      DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ12_MASK, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ13__SHIFT, 0x0000 },
+	{   ixDIDT_SQ_EDC_STALL_DELAY_4,      DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ14_MASK, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ14__SHIFT, 0x0000 },
+	{   ixDIDT_SQ_EDC_STALL_DELAY_4,      DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ15_MASK, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ15__SHIFT, 0x0000 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg   PSMSEEDCThresholdConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	/* SQ EDC THRESHOLD */
+	{   ixDIDT_SQ_EDC_THRESHOLD,           DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD_MASK,           DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT,            0x0000 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg   PSMSEEDCCtrlResetConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	/* SQ EDC CTRL */
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_EN_MASK,                       DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT,                        0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK,                   DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT,                    0x0001 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK,          DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT,           0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK,              DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT,               0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK,  DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT,   0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK,   DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT,    0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK,     DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT,      0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK,                    DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT,                     0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK,          DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT,           0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK,         DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT,          0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK,         DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT,          0x0000 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg   PSMSEEDCCtrlConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	/* SQ EDC CTRL */
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_EN_MASK,                       DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT,                        0x0001 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK,                   DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT,                    0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK,          DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT,           0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK,              DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT,               0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK,  DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT,   0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK,   DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT,    0x000E },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK,     DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT,      0x0000 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK,                    DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT,                     0x0001 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK,          DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT,           0x0003 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK,         DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT,          0x0001 },
+	{   ixDIDT_SQ_EDC_CTRL,                DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK,         DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT,          0x0000 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg   PSMGCEDCThresholdConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	{   mmGC_EDC_THRESHOLD,                GC_EDC_THRESHOLD__EDC_THRESHOLD_MASK,                GC_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT,                 0x0000000 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg   PSMGCEDCDroopCtrlConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	{   mmGC_EDC_DROOP_CTRL,               GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN_MASK,          GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN__SHIFT,           0x0001 },
+	{   mmGC_EDC_DROOP_CTRL,               GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD_MASK,         GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD__SHIFT,          0x0384 },
+	{   mmGC_EDC_DROOP_CTRL,               GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX_MASK,       GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX__SHIFT,        0x0001 },
+	{   mmGC_EDC_DROOP_CTRL,               GC_EDC_DROOP_CTRL__AVG_PSM_SEL_MASK,                 GC_EDC_DROOP_CTRL__AVG_PSM_SEL__SHIFT,                  0x0001 },
+	{   mmGC_EDC_DROOP_CTRL,               GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL_MASK,               GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL__SHIFT,                0x0001 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg   PSMGCEDCCtrlResetConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	{   mmGC_EDC_CTRL,                     GC_EDC_CTRL__EDC_EN_MASK,                            GC_EDC_CTRL__EDC_EN__SHIFT,                             0x0000 },
+	{   mmGC_EDC_CTRL,                     GC_EDC_CTRL__EDC_SW_RST_MASK,                        GC_EDC_CTRL__EDC_SW_RST__SHIFT,                         0x0001 },
+	{   mmGC_EDC_CTRL,                     GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK,               GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT,                0x0000 },
+	{   mmGC_EDC_CTRL,                     GC_EDC_CTRL__EDC_FORCE_STALL_MASK,                   GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT,                    0x0000 },
+	{   mmGC_EDC_CTRL,                     GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK,       GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT,        0x0000 },
+	{   mmGC_EDC_CTRL,                     GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK,          GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT,           0x0000 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg   PSMGCEDCCtrlConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	{   mmGC_EDC_CTRL,                     GC_EDC_CTRL__EDC_EN_MASK,                            GC_EDC_CTRL__EDC_EN__SHIFT,                             0x0001 },
+	{   mmGC_EDC_CTRL,                     GC_EDC_CTRL__EDC_SW_RST_MASK,                        GC_EDC_CTRL__EDC_SW_RST__SHIFT,                         0x0000 },
+	{   mmGC_EDC_CTRL,                     GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK,               GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT,                0x0000 },
+	{   mmGC_EDC_CTRL,                     GC_EDC_CTRL__EDC_FORCE_STALL_MASK,                   GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT,                    0x0000 },
+	{   mmGC_EDC_CTRL,                     GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK,       GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT,        0x0000 },
+	{   mmGC_EDC_CTRL,                     GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK,          GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT,           0x0000 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg    AvfsPSMResetConfig_vega10[]=
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	{   0x16A02,                         0xFFFFFFFF,                                            0x0,                                                    0x0000005F },
+	{   0x16A05,                         0xFFFFFFFF,                                            0x0,                                                    0x00000001 },
+	{   0x16A06,                         0x00000001,                                            0x0,                                                    0x02000000 },
+	{   0x16A01,                         0xFFFFFFFF,                                            0x0,                                                    0x00003027 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static const struct vega10_didt_config_reg    AvfsPSMInitConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ *      Offset                             Mask                                                 Shift                                                  Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+	{   0x16A05,                         0xFFFFFFFF,                                            0x18,                                                    0x00000001 },
+	{   0x16A05,                         0xFFFFFFFF,                                            0x8,                                                     0x00000003 },
+	{   0x16A05,                         0xFFFFFFFF,                                            0xa,                                                     0x00000006 },
+	{   0x16A05,                         0xFFFFFFFF,                                            0x7,                                                     0x00000000 },
+	{   0x16A06,                         0xFFFFFFFF,                                            0x18,                                                    0x00000001 },
+	{   0x16A06,                         0xFFFFFFFF,                                            0x19,                                                    0x00000001 },
+	{   0x16A01,                         0xFFFFFFFF,                                            0x0,                                                     0x00003027 },
+
+	{   0xFFFFFFFF  }  /* End of list */
+};
+
+static int vega10_program_didt_config_registers(struct pp_hwmgr *hwmgr, const struct vega10_didt_config_reg *config_regs, enum vega10_didt_config_reg_type reg_type)
+{
+	uint32_t data;
+
+	PP_ASSERT_WITH_CODE((config_regs != NULL), "[vega10_program_didt_config_registers] Invalid config register table!", return -EINVAL);
+
+	while (config_regs->offset != 0xFFFFFFFF) {
+		switch (reg_type) {
+		case VEGA10_CONFIGREG_DIDT:
+			data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset);
+			data &= ~config_regs->mask;
+			data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data);
+			break;
+		case VEGA10_CONFIGREG_GCCAC:
+			data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset);
+			data &= ~config_regs->mask;
+			data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+			cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data);
+			break;
+		case VEGA10_CONFIGREG_SECAC:
+			data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_SE_CAC, config_regs->offset);
+			data &= ~config_regs->mask;
+			data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+			cgs_write_ind_register(hwmgr->device, CGS_IND_REG_SE_CAC, config_regs->offset, data);
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		config_regs++;
+	}
+
+	return 0;
+}
+
+static int vega10_program_gc_didt_config_registers(struct pp_hwmgr *hwmgr, const struct vega10_didt_config_reg *config_regs)
+{
+	uint32_t data;
+
+	while (config_regs->offset != 0xFFFFFFFF) {
+		data = cgs_read_register(hwmgr->device, config_regs->offset);
+		data &= ~config_regs->mask;
+		data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+		cgs_write_register(hwmgr->device, config_regs->offset, data);
+		config_regs++;
+	}
+
+	return 0;
+}
+
+static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
+{
+	uint32_t data;
+	int result;
+	uint32_t en = (enable ? 1 : 0);
+	uint32_t didt_block_info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) {
+		data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0);
+		data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
+		data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK);
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data);
+		didt_block_info &= ~SQ_Enable_MASK;
+		didt_block_info |= en << SQ_Enable_SHIFT;
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) {
+		data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0);
+		data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
+		data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK);
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data);
+		didt_block_info &= ~DB_Enable_MASK;
+		didt_block_info |= en << DB_Enable_SHIFT;
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) {
+		data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0);
+		data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
+		data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK);
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data);
+		didt_block_info &= ~TD_Enable_MASK;
+		didt_block_info |= en << TD_Enable_SHIFT;
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+		data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0);
+		data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
+		data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK);
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data);
+		didt_block_info &= ~TCP_Enable_MASK;
+		didt_block_info |= en << TCP_Enable_SHIFT;
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping)) {
+		data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_CTRL0);
+		data &= ~DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK;
+		data |= ((en << DIDT_DBR_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK);
+		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_CTRL0, data);
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable)) {
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) {
+			data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL);
+			data &= ~DIDT_SQ_EDC_CTRL__EDC_EN_MASK;
+			data |= ((en << DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT) & DIDT_SQ_EDC_CTRL__EDC_EN_MASK);
+			data &= ~DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK;
+			data |= ((~en << DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK);
+			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL, data);
+		}
+
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) {
+			data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL);
+			data &= ~DIDT_DB_EDC_CTRL__EDC_EN_MASK;
+			data |= ((en << DIDT_DB_EDC_CTRL__EDC_EN__SHIFT) & DIDT_DB_EDC_CTRL__EDC_EN_MASK);
+			data &= ~DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK;
+			data |= ((~en << DIDT_DB_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK);
+			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL, data);
+		}
+
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) {
+			data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL);
+			data &= ~DIDT_TD_EDC_CTRL__EDC_EN_MASK;
+			data |= ((en << DIDT_TD_EDC_CTRL__EDC_EN__SHIFT) & DIDT_TD_EDC_CTRL__EDC_EN_MASK);
+			data &= ~DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK;
+			data |= ((~en << DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK);
+			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL, data);
+		}
+
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+			data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL);
+			data &= ~DIDT_TCP_EDC_CTRL__EDC_EN_MASK;
+			data |= ((en << DIDT_TCP_EDC_CTRL__EDC_EN__SHIFT) & DIDT_TCP_EDC_CTRL__EDC_EN_MASK);
+			data &= ~DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK;
+			data |= ((~en << DIDT_TCP_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK);
+			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL, data);
+		}
+
+		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping)) {
+			data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL);
+			data &= ~DIDT_DBR_EDC_CTRL__EDC_EN_MASK;
+			data |= ((en << DIDT_DBR_EDC_CTRL__EDC_EN__SHIFT) & DIDT_DBR_EDC_CTRL__EDC_EN_MASK);
+			data &= ~DIDT_DBR_EDC_CTRL__EDC_SW_RST_MASK;
+			data |= ((~en << DIDT_DBR_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_DBR_EDC_CTRL__EDC_SW_RST_MASK);
+			cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL, data);
+		}
+	}
+
+	if (enable) {
+		/* For Vega10, SMC does not support any mask yet. */
+		result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
+		PP_ASSERT((0 == result), "[EnableDiDtConfig] SMC Configure Gfx Didt Failed!");
+	}
+}
+
+static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
+{
+	int result;
+	uint32_t num_se = 0, count, data;
+	struct cgs_system_info sys_info = {0};
+	uint32_t reg;
+
+	sys_info.size = sizeof(struct cgs_system_info);
+	sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
+	if (cgs_query_system_info(hwmgr->device, &sys_info) == 0)
+		num_se = sys_info.value;
+
+	cgs_enter_safe_mode(hwmgr->device, true);
+
+	cgs_lock_grbm_idx(hwmgr->device, true);
+	reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+	for (count = 0; count < num_se; count++) {
+		data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+		cgs_write_register(hwmgr->device, reg, data);
+
+		result =  vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT);
+		result |= vega10_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega10, VEGA10_CONFIGREG_DIDT);
+		result |= vega10_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+		result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl1Config_Vega10, VEGA10_CONFIGREG_DIDT);
+		result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl2Config_Vega10, VEGA10_CONFIGREG_DIDT);
+		result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl3Config_vega10, VEGA10_CONFIGREG_DIDT);
+		result |= vega10_program_didt_config_registers(hwmgr, SEDiDtTuningCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+		result |= vega10_program_didt_config_registers(hwmgr, SELCacConfig_Vega10, VEGA10_CONFIGREG_SECAC);
+		result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl0Config_Vega10, VEGA10_CONFIGREG_DIDT);
+
+		if (0 != result)
+			break;
+	}
+	cgs_write_register(hwmgr->device, reg, 0xE0000000);
+	cgs_lock_grbm_idx(hwmgr->device, false);
+
+	vega10_didt_set_mask(hwmgr, true);
+
+	cgs_enter_safe_mode(hwmgr->device, false);
+
+	return 0;
+}
+
+static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
+{
+	cgs_enter_safe_mode(hwmgr->device, true);
+
+	vega10_didt_set_mask(hwmgr, false);
+
+	cgs_enter_safe_mode(hwmgr->device, false);
+
+	return 0;
+}
+
+static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
+{
+	int result;
+	uint32_t num_se = 0, count, data;
+	struct cgs_system_info sys_info = {0};
+	uint32_t reg;
+
+	sys_info.size = sizeof(struct cgs_system_info);
+	sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
+	if (cgs_query_system_info(hwmgr->device, &sys_info) == 0)
+		num_se = sys_info.value;
+
+	cgs_enter_safe_mode(hwmgr->device, true);
+
+	cgs_lock_grbm_idx(hwmgr->device, true);
+	reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+	for (count = 0; count < num_se; count++) {
+		data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+		cgs_write_register(hwmgr->device, reg, data);
+
+		result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT);
+		result |= vega10_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega10, VEGA10_CONFIGREG_DIDT);
+		result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl3Config_vega10, VEGA10_CONFIGREG_DIDT);
+		result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl0Config_Vega10, VEGA10_CONFIGREG_DIDT);
+		if (0 != result)
+			break;
+	}
+	cgs_write_register(hwmgr->device, reg, 0xE0000000);
+	cgs_lock_grbm_idx(hwmgr->device, false);
+
+	vega10_didt_set_mask(hwmgr, true);
+
+	cgs_enter_safe_mode(hwmgr->device, false);
+
+	vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10);
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC))
+		vega10_program_gc_didt_config_registers(hwmgr, GCDiDtCtrl0Config_vega10);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+		vega10_program_gc_didt_config_registers(hwmgr,  AvfsPSMInitConfig_vega10);
+
+	return 0;
+}
+
+static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
+{
+	uint32_t data;
+
+	cgs_enter_safe_mode(hwmgr->device, true);
+
+	vega10_didt_set_mask(hwmgr, false);
+
+	cgs_enter_safe_mode(hwmgr->device, false);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) {
+		data = 0x00000000;
+		cgs_write_register(hwmgr->device, mmGC_DIDT_CTRL0, data);
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+		vega10_program_gc_didt_config_registers(hwmgr,  AvfsPSMResetConfig_vega10);
+
+	return 0;
+}
+
+static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
+{
+	int result;
+	uint32_t num_se = 0, count, data;
+	struct cgs_system_info sys_info = {0};
+	uint32_t reg;
+
+	sys_info.size = sizeof(struct cgs_system_info);
+	sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
+	if (cgs_query_system_info(hwmgr->device, &sys_info) == 0)
+		num_se = sys_info.value;
+
+	cgs_enter_safe_mode(hwmgr->device, true);
+
+	cgs_lock_grbm_idx(hwmgr->device, true);
+	reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+	for (count = 0; count < num_se; count++) {
+		data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+		cgs_write_register(hwmgr->device, reg, data);
+		result = vega10_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+		result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+		result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+		result |= vega10_program_didt_config_registers(hwmgr, SEEDCThresholdConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+		result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+		result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+
+		if (0 != result)
+			break;
+	}
+	cgs_write_register(hwmgr->device, reg, 0xE0000000);
+	cgs_lock_grbm_idx(hwmgr->device, false);
+
+	vega10_didt_set_mask(hwmgr, true);
+
+	cgs_enter_safe_mode(hwmgr->device, false);
+
+	return 0;
+}
+
+static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
+{
+	cgs_enter_safe_mode(hwmgr->device, true);
+
+	vega10_didt_set_mask(hwmgr, false);
+
+	cgs_enter_safe_mode(hwmgr->device, false);
+
+	return 0;
+}
+
+static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
+{
+	int result;
+	uint32_t num_se = 0;
+	uint32_t count, data;
+	struct cgs_system_info sys_info = {0};
+	uint32_t reg;
+
+	sys_info.size = sizeof(struct cgs_system_info);
+	sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
+	if (cgs_query_system_info(hwmgr->device, &sys_info) == 0)
+		num_se = sys_info.value;
+
+	cgs_enter_safe_mode(hwmgr->device, true);
+
+	vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
+
+	cgs_lock_grbm_idx(hwmgr->device, true);
+	reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+	for (count = 0; count < num_se; count++) {
+		data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+		cgs_write_register(hwmgr->device, reg, data);
+		result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+		result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+		result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+		result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+
+		if (0 != result)
+			break;
+	}
+	cgs_write_register(hwmgr->device, reg, 0xE0000000);
+	cgs_lock_grbm_idx(hwmgr->device, false);
+
+	vega10_didt_set_mask(hwmgr, true);
+
+	cgs_enter_safe_mode(hwmgr->device, false);
+
+	vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) {
+		vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlResetConfig_vega10);
+		vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlConfig_vega10);
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+		vega10_program_gc_didt_config_registers(hwmgr,  AvfsPSMInitConfig_vega10);
+
+	return 0;
+}
+
+static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
+{
+	uint32_t data;
+
+	cgs_enter_safe_mode(hwmgr->device, true);
+
+	vega10_didt_set_mask(hwmgr, false);
+
+	cgs_enter_safe_mode(hwmgr->device, false);
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) {
+		data = 0x00000000;
+		cgs_write_register(hwmgr->device, mmGC_EDC_CTRL, data);
+	}
+
+	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+		vega10_program_gc_didt_config_registers(hwmgr,  AvfsPSMResetConfig_vega10);
+
+	return 0;
+}
+
+static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
+{
+	uint32_t reg;
+	int result;
+
+	cgs_enter_safe_mode(hwmgr->device, true);
+
+	cgs_lock_grbm_idx(hwmgr->device, true);
+	reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+	cgs_write_register(hwmgr->device, reg, 0xE0000000);
+	cgs_lock_grbm_idx(hwmgr->device, false);
+
+	result = vega10_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+	result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlForceStallConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+	if (0 != result)
+		return result;
+
+	vega10_didt_set_mask(hwmgr, true);
+
+	cgs_enter_safe_mode(hwmgr->device, false);
+
+	return 0;
+}
+
+static int vega10_disable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
+{
+	int result;
+
+	result = vega10_disable_se_edc_config(hwmgr);
+	PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDtConfig] Pre DIDT disable clock gating failed!", return result);
+
+	return 0;
+}
+
+int vega10_enable_didt_config(struct pp_hwmgr *hwmgr)
+{
+	int result = 0;
+	struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+
+	if (data->smu_features[GNLD_DIDT].supported) {
+		if (data->smu_features[GNLD_DIDT].enabled)
+			PP_DBG_LOG("[EnableDiDtConfig] Feature DiDt Already enabled!\n");
+
+		switch (data->registry_data.didt_mode) {
+		case 0:
+			result = vega10_enable_cac_driving_se_didt_config(hwmgr);
+			PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 0 Failed!", return result);
+			break;
+		case 2:
+			result = vega10_enable_psm_gc_didt_config(hwmgr);
+			PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 2 Failed!", return result);
+			break;
+		case 3:
+			result = vega10_enable_se_edc_config(hwmgr);
+			PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 3 Failed!", return result);
+			break;
+		case 1:
+		case 4:
+		case 5:
+			result = vega10_enable_psm_gc_edc_config(hwmgr);
+			PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 5 Failed!", return result);
+			break;
+		case 6:
+			result = vega10_enable_se_edc_force_stall_config(hwmgr);
+			PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 6 Failed!", return result);
+			break;
+		default:
+			result = -EINVAL;
+			break;
+		}
+
+		if (0 == result) {
+			PP_ASSERT_WITH_CODE((!vega10_enable_smc_features(hwmgr->smumgr, true, data->smu_features[GNLD_DIDT].smu_feature_bitmap)),
+				"[EnableDiDtConfig] Attempt to Enable DiDt feature Failed!", return result);
+			data->smu_features[GNLD_DIDT].enabled = true;
+		}
+	}
+
+	return result;
+}
+
+int vega10_disable_didt_config(struct pp_hwmgr *hwmgr)
+{
+	int result = 0;
+	struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+
+	if (data->smu_features[GNLD_DIDT].supported) {
+		if (!data->smu_features[GNLD_DIDT].enabled)
+			PP_DBG_LOG("[DisableDiDtConfig] Feature DiDt Already Disabled!\n");
+
+		switch (data->registry_data.didt_mode) {
+		case 0:
+			result = vega10_disable_cac_driving_se_didt_config(hwmgr);
+			PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 0 Failed!", return result);
+			break;
+		case 2:
+			result = vega10_disable_psm_gc_didt_config(hwmgr);
+			PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 2 Failed!", return result);
+			break;
+		case 3:
+			result = vega10_disable_se_edc_config(hwmgr);
+			PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 3 Failed!", return result);
+			break;
+		case 1:
+		case 4:
+		case 5:
+			result = vega10_disable_psm_gc_edc_config(hwmgr);
+			PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 5 Failed!", return result);
+			break;
+		case 6:
+			result = vega10_disable_se_edc_force_stall_config(hwmgr);
+			PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 6 Failed!", return result);
+			break;
+		default:
+			result = -EINVAL;
+			break;
+		}
+
+		if (0 == result) {
+			PP_ASSERT_WITH_CODE((0 != vega10_enable_smc_features(hwmgr->smumgr, false, data->smu_features[GNLD_DIDT].smu_feature_bitmap)),
+					"[DisableDiDtConfig] Attempt to Disable DiDt feature Failed!", return result);
+			data->smu_features[GNLD_DIDT].enabled = false;
+		}
+	}
+
+	return result;
+}
 
 void vega10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
 {

+ 16 - 0
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h

@@ -31,6 +31,12 @@ enum vega10_pt_config_reg_type {
 	VEGA10_CONFIGREG_MAX
 };
 
+enum vega10_didt_config_reg_type {
+	VEGA10_CONFIGREG_DIDT = 0,
+	VEGA10_CONFIGREG_GCCAC,
+	VEGA10_CONFIGREG_SECAC
+};
+
 /* PowerContainment Features */
 #define POWERCONTAINMENT_FEATURE_DTE             0x00000001
 #define POWERCONTAINMENT_FEATURE_TDCLimit        0x00000002
@@ -44,6 +50,13 @@ struct vega10_pt_config_reg {
 	enum vega10_pt_config_reg_type       type;
 };
 
+struct vega10_didt_config_reg {
+	uint32_t		offset;
+	uint32_t		mask;
+	uint32_t		shift;
+	uint32_t		value;
+};
+
 struct vega10_pt_defaults {
     uint8_t   SviLoadLineEn;
     uint8_t   SviLoadLineVddC;
@@ -62,5 +75,8 @@ int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
 int vega10_power_control_set_level(struct pp_hwmgr *hwmgr);
 int vega10_disable_power_containment(struct pp_hwmgr *hwmgr);
 
+int vega10_enable_didt_config(struct pp_hwmgr *hwmgr);
+int vega10_disable_didt_config(struct pp_hwmgr *hwmgr);
+
 #endif  /* _VEGA10_POWERTUNE_H_ */
 

+ 80 - 8
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c

@@ -31,6 +31,8 @@
 #include "cgs_common.h"
 #include "vega10_pptable.h"
 
+#define NUM_DSPCLK_LEVELS 8
+
 static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
 		enum phm_platform_caps cap)
 {
@@ -644,11 +646,11 @@ static int get_gfxclk_voltage_dependency_table(
 	return 0;
 }
 
-static int get_dcefclk_voltage_dependency_table(
+static int get_pix_clk_voltage_dependency_table(
 		struct pp_hwmgr *hwmgr,
 		struct phm_ppt_v1_clock_voltage_dependency_table
 			**pp_vega10_clk_dep_table,
-		const ATOM_Vega10_DCEFCLK_Dependency_Table *clk_dep_table)
+		const  ATOM_Vega10_PIXCLK_Dependency_Table *clk_dep_table)
 {
 	uint32_t table_size, i;
 	struct phm_ppt_v1_clock_voltage_dependency_table
@@ -681,6 +683,76 @@ static int get_dcefclk_voltage_dependency_table(
 	return 0;
 }
 
+static int get_dcefclk_voltage_dependency_table(
+		struct pp_hwmgr *hwmgr,
+		struct phm_ppt_v1_clock_voltage_dependency_table
+			**pp_vega10_clk_dep_table,
+		const ATOM_Vega10_DCEFCLK_Dependency_Table *clk_dep_table)
+{
+	uint32_t table_size, i;
+	uint8_t num_entries;
+	struct phm_ppt_v1_clock_voltage_dependency_table
+				*clk_table;
+	struct cgs_system_info sys_info = {0};
+	uint32_t dev_id;
+	uint32_t rev_id;
+
+	PP_ASSERT_WITH_CODE((clk_dep_table->ucNumEntries != 0),
+			"Invalid PowerPlay Table!", return -1);
+
+/*
+ * workaround needed to add another DPM level for pioneer cards
+ * as VBIOS is locked down.
+ * This DPM level was added to support 3DPM monitors @ 4K120Hz
+ *
+ */
+	sys_info.size = sizeof(struct cgs_system_info);
+	sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+	cgs_query_system_info(hwmgr->device, &sys_info);
+	dev_id = (uint32_t)sys_info.value;
+
+	sys_info.size = sizeof(struct cgs_system_info);
+	sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
+	cgs_query_system_info(hwmgr->device, &sys_info);
+	rev_id = (uint32_t)sys_info.value;
+
+	if (dev_id == 0x6863 && rev_id == 0 &&
+		clk_dep_table->entries[clk_dep_table->ucNumEntries - 1].ulClk < 90000)
+		num_entries = clk_dep_table->ucNumEntries + 1 > NUM_DSPCLK_LEVELS ?
+				NUM_DSPCLK_LEVELS : clk_dep_table->ucNumEntries + 1;
+	else
+		num_entries = clk_dep_table->ucNumEntries;
+
+
+	table_size = sizeof(uint32_t) +
+			sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
+			num_entries;
+
+	clk_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)
+			kzalloc(table_size, GFP_KERNEL);
+
+	if (!clk_table)
+		return -ENOMEM;
+
+	clk_table->count = (uint32_t)num_entries;
+
+	for (i = 0; i < clk_dep_table->ucNumEntries; i++) {
+		clk_table->entries[i].vddInd =
+				clk_dep_table->entries[i].ucVddInd;
+		clk_table->entries[i].clk =
+				le32_to_cpu(clk_dep_table->entries[i].ulClk);
+	}
+
+	if (i < num_entries) {
+		clk_table->entries[i].vddInd = clk_dep_table->entries[i-1].ucVddInd;
+		clk_table->entries[i].clk = 90000;
+	}
+
+	*pp_vega10_clk_dep_table = clk_table;
+
+	return 0;
+}
+
 static int get_pcie_table(struct pp_hwmgr *hwmgr,
 		struct phm_ppt_v1_pcie_table **vega10_pcie_table,
 		const Vega10_PPTable_Generic_SubTable_Header *table)
@@ -862,21 +934,21 @@ static int init_powerplay_extended_tables(
 				gfxclk_dep_table);
 
 	if (!result && powerplay_table->usPixclkDependencyTableOffset)
-		result = get_dcefclk_voltage_dependency_table(hwmgr,
+		result = get_pix_clk_voltage_dependency_table(hwmgr,
 				&pp_table_info->vdd_dep_on_pixclk,
-				(const ATOM_Vega10_DCEFCLK_Dependency_Table*)
+				(const ATOM_Vega10_PIXCLK_Dependency_Table*)
 				pixclk_dep_table);
 
 	if (!result && powerplay_table->usPhyClkDependencyTableOffset)
-		result = get_dcefclk_voltage_dependency_table(hwmgr,
+		result = get_pix_clk_voltage_dependency_table(hwmgr,
 				&pp_table_info->vdd_dep_on_phyclk,
-				(const ATOM_Vega10_DCEFCLK_Dependency_Table *)
+				(const ATOM_Vega10_PIXCLK_Dependency_Table *)
 				phyclk_dep_table);
 
 	if (!result && powerplay_table->usDispClkDependencyTableOffset)
-		result = get_dcefclk_voltage_dependency_table(hwmgr,
+		result = get_pix_clk_voltage_dependency_table(hwmgr,
 				&pp_table_info->vdd_dep_on_dispclk,
-				(const ATOM_Vega10_DCEFCLK_Dependency_Table *)
+				(const ATOM_Vega10_PIXCLK_Dependency_Table *)
 				dispclk_dep_table);
 
 	if (!result && powerplay_table->usDcefclkDependencyTableOffset)

+ 5 - 0
drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h

@@ -164,9 +164,14 @@ enum phm_platform_caps {
 	PHM_PlatformCaps_EnablePlatformPowerManagement,         /* indicates that Platform Power Management feature is supported */
 	PHM_PlatformCaps_SurpriseRemoval,                       /* indicates that surprise removal feature is requested */
 	PHM_PlatformCaps_NewCACVoltage,                         /* indicates new CAC voltage table support */
+	PHM_PlatformCaps_DiDtSupport,                           /* for dI/dT feature */
 	PHM_PlatformCaps_DBRamping,                             /* for dI/dT feature */
 	PHM_PlatformCaps_TDRamping,                             /* for dI/dT feature */
 	PHM_PlatformCaps_TCPRamping,                            /* for dI/dT feature */
+	PHM_PlatformCaps_DBRRamping,                            /* for dI/dT feature */
+	PHM_PlatformCaps_DiDtEDCEnable,                         /* for dI/dT feature */
+	PHM_PlatformCaps_GCEDC,                                 /* for dI/dT feature */
+	PHM_PlatformCaps_PSM,                                   /* for dI/dT feature */
 	PHM_PlatformCaps_EnableSMU7ThermalManagement,           /* SMC will manage thermal events */
 	PHM_PlatformCaps_FPS,                                   /* FPS support */
 	PHM_PlatformCaps_ACP,                                   /* ACP support */

+ 6 - 0
drivers/gpu/drm/amd/powerplay/inc/pp_debug.h

@@ -42,6 +42,12 @@
 		}				\
 	} while (0)
 
+#define PP_ASSERT(cond, msg)	\
+	do {					\
+		if (!(cond)) {			\
+			pr_warn("%s\n", msg);	\
+		}				\
+	} while (0)
 
 #define PP_DBG_LOG(fmt, ...) \
 	do { \

+ 2 - 0
drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h

@@ -41,6 +41,8 @@ inline static uint32_t soc15_get_register_offset(
 		reg = MP1_BASE.instance[inst].segment[segment] + offset;
 	else if (hw_id == DF_HWID)
 		reg = DF_BASE.instance[inst].segment[segment] + offset;
+	else if (hw_id == GC_HWID)
+		reg = GC_BASE.instance[inst].segment[segment] + offset;
 
 	return reg;
 }

+ 7 - 6
drivers/gpu/drm/amd/powerplay/inc/smu9.h

@@ -55,9 +55,9 @@
 #define FEATURE_FW_CTF_BIT              23
 #define FEATURE_LED_DISPLAY_BIT         24
 #define FEATURE_FAN_CONTROL_BIT         25
-#define FEATURE_VOLTAGE_CONTROLLER_BIT  26
-#define FEATURE_SPARE_27_BIT            27
-#define FEATURE_SPARE_28_BIT            28
+#define FEATURE_FAST_PPT_BIT            26
+#define FEATURE_GFX_EDC_BIT             27
+#define FEATURE_ACG_BIT                 28
 #define FEATURE_SPARE_29_BIT            29
 #define FEATURE_SPARE_30_BIT            30
 #define FEATURE_SPARE_31_BIT            31
@@ -90,9 +90,10 @@
 #define FFEATURE_FW_CTF_MASK             (1 << FEATURE_FW_CTF_BIT             )
 #define FFEATURE_LED_DISPLAY_MASK        (1 << FEATURE_LED_DISPLAY_BIT        )
 #define FFEATURE_FAN_CONTROL_MASK        (1 << FEATURE_FAN_CONTROL_BIT        )
-#define FFEATURE_VOLTAGE_CONTROLLER_MASK (1 << FEATURE_VOLTAGE_CONTROLLER_BIT )
-#define FFEATURE_SPARE_27_MASK           (1 << FEATURE_SPARE_27_BIT           )
-#define FFEATURE_SPARE_28_MASK           (1 << FEATURE_SPARE_28_BIT           )
+
+#define FEATURE_FAST_PPT_MASK            (1 << FAST_PPT_BIT                   )
+#define FEATURE_GFX_EDC_MASK             (1 << FEATURE_GFX_EDC_BIT            )
+#define FEATURE_ACG_MASK                 (1 << FEATURE_ACG_BIT                )
 #define FFEATURE_SPARE_29_MASK           (1 << FEATURE_SPARE_29_BIT           )
 #define FFEATURE_SPARE_30_MASK           (1 << FEATURE_SPARE_30_BIT           )
 #define FFEATURE_SPARE_31_MASK           (1 << FEATURE_SPARE_31_BIT           )

+ 4 - 1
drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h

@@ -312,7 +312,10 @@ typedef struct {
 
   PllSetting_t GfxBoostState;
 
-  uint32_t     Reserved[14];
+  uint8_t      AcgEnable[NUM_GFXCLK_DPM_LEVELS];
+  GbVdroopTable_t AcgBtcGbVdroopTable;
+  QuadraticInt_t  AcgAvfsGb;
+  uint32_t     Reserved[4];
 
   /* Padding - ignore */
   uint32_t     MmHubPadding[7]; /* SMU internal use */

+ 3 - 0
drivers/gpu/drm/amd/powerplay/inc/smumgr.h

@@ -131,6 +131,7 @@ struct pp_smumgr_func {
 	bool (*is_dpm_running)(struct pp_hwmgr *hwmgr);
 	int (*populate_requested_graphic_levels)(struct pp_hwmgr *hwmgr,
 			struct amd_pp_profile *request);
+	bool (*is_hw_avfs_present)(struct pp_smumgr *smumgr);
 };
 
 struct pp_smumgr {
@@ -202,6 +203,8 @@ extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr);
 extern int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
 		struct amd_pp_profile *request);
 
+extern bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr);
+
 #define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
 
 #define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK

+ 4 - 0
drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h

@@ -124,6 +124,10 @@ typedef uint16_t PPSMC_Result;
 #define PPSMC_MSG_NumOfDisplays                  0x56
 #define PPSMC_MSG_ReadSerialNumTop32             0x58
 #define PPSMC_MSG_ReadSerialNumBottom32          0x59
+#define PPSMC_MSG_RunAcgBtc                      0x5C
+#define PPSMC_MSG_RunAcgInClosedLoop             0x5D
+#define PPSMC_MSG_RunAcgInOpenLoop               0x5E
+#define PPSMC_MSG_InitializeAcg                  0x5F
 #define PPSMC_MSG_GetCurrPkgPwr                  0x61
 #define PPSMC_Message_Count                      0x62
 

+ 19 - 0
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c

@@ -2129,6 +2129,25 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
 	return 0;
 }
 
+
+int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
+{
+	int ret;
+	struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
+	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+
+	if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
+		return 0;
+
+	ret = smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs);
+
+	if (!ret)
+		/* If this param is not changed, this function could fire unnecessarily */
+		smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
+
+	return ret;
+}
+
 static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
 {
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);

+ 1 - 0
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h

@@ -48,5 +48,6 @@ int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
 bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr);
 int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
 		struct amd_pp_profile *request);
+int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr);
 #endif
 

+ 38 - 146
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c

@@ -161,56 +161,47 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
 
 static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
 {
-	int i, result = -1;
+	int i;
+	int result = -EINVAL;
 	uint32_t reg, data;
-	const PWR_Command_Table *virus = PwrVirusTable;
-	struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
 
-	priv->avfs.AvfsBtcStatus = AVFS_LOAD_VIRUS;
-	for (i = 0; (i < PWR_VIRUS_TABLE_SIZE); i++) {
-		switch (virus->command) {
+	const PWR_Command_Table *pvirus = PwrVirusTable;
+	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+
+	for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) {
+		switch (pvirus->command) {
 		case PwrCmdWrite:
-			reg  = virus->reg;
-			data = virus->data;
+			reg  = pvirus->reg;
+			data = pvirus->data;
 			cgs_write_register(smumgr->device, reg, data);
 			break;
+
 		case PwrCmdEnd:
-			priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_LOADED;
 			result = 0;
 			break;
+
 		default:
-			pr_err("Table Exit with Invalid Command!");
-			priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_FAIL;
-			result = -1;
+			pr_info("Table Exit with Invalid Command!");
+			smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
+			result = -EINVAL;
 			break;
 		}
-		virus++;
+		pvirus++;
 	}
+
 	return result;
 }
 
 static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
 {
 	int result = 0;
-	struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
+	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
 
-	priv->avfs.AvfsBtcStatus = AVFS_BTC_STARTED;
-	if (priv->avfs.AvfsBtcParam) {
-		if (!smum_send_msg_to_smc_with_parameter(smumgr,
-				PPSMC_MSG_PerformBtc, priv->avfs.AvfsBtcParam)) {
-			if (!smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) {
-				priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_UNSAVED;
-				result = 0;
-			} else {
-				pr_err("[AVFS][fiji_start_avfs_btc] Attempt"
-						" to Enable AVFS Failed!");
-				smum_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs);
-				result = -1;
-			}
-		} else {
-			pr_err("[AVFS][fiji_start_avfs_btc] "
-					"PerformBTC SMU msg failed");
-			result = -1;
+	if (0 != smu_data->avfs.avfs_btc_param) {
+		if (0 != smu7_send_msg_to_smc_with_parameter(smumgr,
+				PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
+			pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed");
+			result = -EINVAL;
 		}
 	}
 	/* Soft-Reset to reset the engine before loading uCode */
@@ -224,42 +215,6 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
 	return result;
 }
 
-static int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
-{
-	int result = 0;
-	uint32_t table_start;
-	uint32_t charz_freq_addr, inversion_voltage_addr, charz_freq;
-	uint16_t inversion_voltage;
-
-	charz_freq = 0x30750000; /* In 10KHz units 0x00007530 Actual value */
-	inversion_voltage = 0x1A04; /* mV Q14.2 0x41A Actual value */
-
-	PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
-			SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header,
-					PmFuseTable), &table_start, 0x40000),
-			"[AVFS][Fiji_SetupGfxLvlStruct] SMU could not communicate "
-			"starting address of PmFuse structure",
-			return -1;);
-
-	charz_freq_addr = table_start +
-			offsetof(struct SMU73_Discrete_PmFuses, PsmCharzFreq);
-	inversion_voltage_addr = table_start +
-			offsetof(struct SMU73_Discrete_PmFuses, InversionVoltage);
-
-	result = smu7_copy_bytes_to_smc(smumgr, charz_freq_addr,
-			(uint8_t *)(&charz_freq), sizeof(charz_freq), 0x40000);
-	PP_ASSERT_WITH_CODE(0 == result,
-			"[AVFS][fiji_setup_pm_fuse_for_avfs] charz_freq could not "
-			"be populated.", return -1;);
-
-	result = smu7_copy_bytes_to_smc(smumgr, inversion_voltage_addr,
-			(uint8_t *)(&inversion_voltage), sizeof(inversion_voltage), 0x40000);
-	PP_ASSERT_WITH_CODE(0 == result, "[AVFS][fiji_setup_pm_fuse_for_avfs] "
-			"charz_freq could not be populated.", return -1;);
-
-	return result;
-}
-
 static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
 {
 	int32_t vr_config;
@@ -298,93 +253,41 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
 	return 0;
 }
 
-/* Work in Progress */
-static int fiji_restore_vft_table(struct pp_smumgr *smumgr)
-{
-	struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
-
-	if (AVFS_BTC_COMPLETED_SAVED == priv->avfs.AvfsBtcStatus) {
-		priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_RESTORED;
-		return 0;
-	} else
-		return -EINVAL;
-}
-
-/* Work in Progress */
-static int fiji_save_vft_table(struct pp_smumgr *smumgr)
-{
-	struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
-
-	if (AVFS_BTC_COMPLETED_SAVED == priv->avfs.AvfsBtcStatus) {
-		priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_RESTORED;
-		return 0;
-	} else
-		return -EINVAL;
-}
-
 static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
 {
-	struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
+	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
 
-	switch (priv->avfs.AvfsBtcStatus) {
-	case AVFS_BTC_COMPLETED_SAVED: /*S3 State - Pre SMU Start */
-		priv->avfs.AvfsBtcStatus = AVFS_BTC_RESTOREVFT_FAILED;
-		PP_ASSERT_WITH_CODE(0 == fiji_restore_vft_table(smumgr),
-				"[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics "
-				"Level table over to SMU",
-				return -1;);
-		priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_RESTORED;
-		break;
-	case AVFS_BTC_COMPLETED_RESTORED: /*S3 State - Post SMU Start*/
-		priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR;
-		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr,
-				0x666),
-				"[AVFS][fiji_avfs_event_mgr] SMU did not respond "
-				"correctly to VftTableIsValid Msg",
-				return -1;);
-		priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR;
-		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr,
-				PPSMC_MSG_EnableAvfs),
-				"[AVFS][fiji_avfs_event_mgr] SMU did not respond "
-				"correctly to EnableAvfs Message Msg",
-				return -1;);
-		priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_SAVED;
+	switch (smu_data->avfs.avfs_btc_status) {
+	case AVFS_BTC_COMPLETED_PREVIOUSLY:
 		break;
+
 	case AVFS_BTC_BOOT: /*Cold Boot State - Post SMU Start*/
 		if (!smu_started)
 			break;
-		priv->avfs.AvfsBtcStatus = AVFS_BTC_FAILED;
-		PP_ASSERT_WITH_CODE(0 == fiji_setup_pm_fuse_for_avfs(smumgr),
-				"[AVFS][fiji_avfs_event_mgr] Failure at "
-				"fiji_setup_pm_fuse_for_avfs",
-				return -1;);
-		priv->avfs.AvfsBtcStatus = AVFS_BTC_DPMTABLESETUP_FAILED;
+		smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
 		PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(smumgr),
 				"[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level"
 				" table over to SMU",
-				return -1;);
-		priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_FAIL;
+				return -EINVAL;);
+		smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
 		PP_ASSERT_WITH_CODE(0 == fiji_setup_pwr_virus(smumgr),
 				"[AVFS][fiji_avfs_event_mgr] Could not setup "
 				"Pwr Virus for AVFS ",
-				return -1;);
-		priv->avfs.AvfsBtcStatus = AVFS_BTC_FAILED;
+				return -EINVAL;);
+		smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
 		PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(smumgr),
 				"[AVFS][fiji_avfs_event_mgr] Failure at "
 				"fiji_start_avfs_btc. AVFS Disabled",
-				return -1;);
-		priv->avfs.AvfsBtcStatus = AVFS_BTC_SAVEVFT_FAILED;
-		PP_ASSERT_WITH_CODE(0 == fiji_save_vft_table(smumgr),
-				"[AVFS][fiji_avfs_event_mgr] Could not save VFT Table",
-				return -1;);
-		priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_SAVED;
+				return -EINVAL;);
+
+		smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS;
 		break;
 	case AVFS_BTC_DISABLED: /* Do nothing */
-		break;
 	case AVFS_BTC_NOTSUPPORTED: /* Do nothing */
+	case AVFS_BTC_ENABLEAVFS:
 		break;
 	default:
-		pr_err("[AVFS] Something is broken. See log!");
+		pr_err("AVFS failed status is %x !\n", smu_data->avfs.avfs_btc_status);
 		break;
 	}
 	return 0;
@@ -477,19 +380,6 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
 	if (smu7_init(smumgr))
 		return -EINVAL;
 
-	fiji_priv->avfs.AvfsBtcStatus = AVFS_BTC_BOOT;
-	if (fiji_is_hw_avfs_present(smumgr))
-		/* AVFS Parameter
-		 * 0 - BTC DC disabled, BTC AC disabled
-		 * 1 - BTC DC enabled,  BTC AC disabled
-		 * 2 - BTC DC disabled, BTC AC enabled
-		 * 3 - BTC DC enabled,  BTC AC enabled
-		 * Default is 0 - BTC DC disabled, BTC AC disabled
-		 */
-		fiji_priv->avfs.AvfsBtcParam = 0;
-	else
-		fiji_priv->avfs.AvfsBtcStatus = AVFS_BTC_NOTSUPPORTED;
-
 	for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
 		fiji_priv->activity_target[i] = 30;
 
@@ -514,10 +404,12 @@ const struct pp_smumgr_func fiji_smu_funcs = {
 	.init_smc_table = fiji_init_smc_table,
 	.update_sclk_threshold = fiji_update_sclk_threshold,
 	.thermal_setup_fan_table = fiji_thermal_setup_fan_table,
+	.thermal_avfs_enable = fiji_thermal_avfs_enable,
 	.populate_all_graphic_levels = fiji_populate_all_graphic_levels,
 	.populate_all_memory_levels = fiji_populate_all_memory_levels,
 	.get_mac_definition = fiji_get_mac_definition,
 	.initialize_mc_reg_table = fiji_initialize_mc_reg_table,
 	.is_dpm_running = fiji_is_dpm_running,
 	.populate_requested_graphic_levels = fiji_populate_requested_graphic_levels,
+	.is_hw_avfs_present = fiji_is_hw_avfs_present,
 };

+ 0 - 11
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h

@@ -28,17 +28,8 @@
 #include "smu7_smumgr.h"
 
 
-
-struct fiji_smu_avfs {
-	enum AVFS_BTC_STATUS AvfsBtcStatus;
-	uint32_t           AvfsBtcParam;
-};
-
-
 struct fiji_smumgr {
 	struct smu7_smumgr                   smu7_data;
-
-	struct fiji_smu_avfs avfs;
 	struct SMU73_Discrete_DpmTable       smc_state_table;
 	struct SMU73_Discrete_Ulv            ulv_setting;
 	struct SMU73_Discrete_PmFuses  power_tune_table;
@@ -47,7 +38,5 @@ struct fiji_smumgr {
 
 };
 
-
-
 #endif
 

+ 2 - 2
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c

@@ -1498,7 +1498,7 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
 			table_info->vdd_dep_on_sclk;
 
 
-	if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+	if (((struct smu7_smumgr *)smu_data)->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
 		return result;
 
 	result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
@@ -1889,7 +1889,7 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
 {
 	int ret;
 	struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
-	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
 	if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)

+ 15 - 19
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c

@@ -60,16 +60,14 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
 static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = {
 	0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
 
-
 static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
 {
 	int i;
-	int result = -1;
+	int result = -EINVAL;
 	uint32_t reg, data;
 
 	const PWR_Command_Table *pvirus = pwr_virus_table;
-	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-
+	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
 
 	for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) {
 		switch (pvirus->command) {
@@ -86,7 +84,7 @@ static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
 		default:
 			pr_info("Table Exit with Invalid Command!");
 			smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
-			result = -1;
+			result = -EINVAL;
 			break;
 		}
 		pvirus++;
@@ -98,7 +96,7 @@ static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
 static int polaris10_perform_btc(struct pp_smumgr *smumgr)
 {
 	int result = 0;
-	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
 
 	if (0 != smu_data->avfs.avfs_btc_param) {
 		if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
@@ -172,10 +170,11 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
 	return 0;
 }
 
+
 static int
 polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
 {
-	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
 
 	switch (smu_data->avfs.avfs_btc_status) {
 	case AVFS_BTC_COMPLETED_PREVIOUSLY:
@@ -185,30 +184,31 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
 
 		smu_data->avfs.avfs_btc_status = AVFS_BTC_DPMTABLESETUP_FAILED;
 		PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(smumgr),
-		"[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU",
-		return -1);
+			"[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU",
+			return -EINVAL);
 
 		if (smu_data->avfs.avfs_btc_param > 1) {
 			pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting.");
 			smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
-			PP_ASSERT_WITH_CODE(-1 == polaris10_setup_pwr_virus(smumgr),
+			PP_ASSERT_WITH_CODE(0 == polaris10_setup_pwr_virus(smumgr),
 			"[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ",
-			return -1);
+			return -EINVAL);
 		}
 
 		smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
 		PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(smumgr),
 					"[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled",
-				 return -1);
-
+				 return -EINVAL);
+		smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS;
 		break;
 
 	case AVFS_BTC_DISABLED:
+	case AVFS_BTC_ENABLEAVFS:
 	case AVFS_BTC_NOTSUPPORTED:
 		break;
 
 	default:
-		pr_info("[AVFS] Something is broken. See log!");
+		pr_err("AVFS failed status is %x!\n", smu_data->avfs.avfs_btc_status);
 		break;
 	}
 
@@ -376,11 +376,6 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr)
 	if (smu7_init(smumgr))
 		return -EINVAL;
 
-	if (polaris10_is_hw_avfs_present(smumgr))
-		smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
-	else
-		smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
-
 	for (i = 0; i < SMU74_MAX_LEVELS_GRAPHICS; i++)
 		smu_data->activity_target[i] = PPPOLARIS10_TARGETACTIVITY_DFLT;
 
@@ -410,4 +405,5 @@ const struct pp_smumgr_func polaris10_smu_funcs = {
 	.get_mac_definition = polaris10_get_mac_definition,
 	.is_dpm_running = polaris10_is_dpm_running,
 	.populate_requested_graphic_levels = polaris10_populate_requested_graphic_levels,
+	.is_hw_avfs_present = polaris10_is_hw_avfs_present,
 };

+ 2 - 10
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h

@@ -32,11 +32,6 @@
 
 #define SMC_RAM_END 0x40000
 
-struct polaris10_avfs {
-	enum AVFS_BTC_STATUS avfs_btc_status;
-	uint32_t           avfs_btc_param;
-};
-
 struct polaris10_pt_defaults {
 	uint8_t   SviLoadLineEn;
 	uint8_t   SviLoadLineVddC;
@@ -51,8 +46,6 @@ struct polaris10_pt_defaults {
 	uint16_t  BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
 };
 
-
-
 struct polaris10_range_table {
 	uint32_t trans_lower_frequency; /* in 10khz */
 	uint32_t trans_upper_frequency;
@@ -61,14 +54,13 @@ struct polaris10_range_table {
 struct polaris10_smumgr {
 	struct smu7_smumgr smu7_data;
 	uint8_t protected_mode;
-	struct polaris10_avfs  avfs;
 	SMU74_Discrete_DpmTable              smc_state_table;
 	struct SMU74_Discrete_Ulv            ulv_setting;
 	struct SMU74_Discrete_PmFuses  power_tune_table;
 	struct polaris10_range_table                range_table[NUM_SCLK_RANGE];
 	const struct polaris10_pt_defaults       *power_tune_defaults;
-	uint32_t                   activity_target[SMU74_MAX_LEVELS_GRAPHICS];
-	uint32_t                   bif_sclk_table[SMU74_MAX_LEVELS_LINK];
+	uint32_t               activity_target[SMU74_MAX_LEVELS_GRAPHICS];
+	uint32_t               bif_sclk_table[SMU74_MAX_LEVELS_LINK];
 };
 
 

+ 5 - 1
drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c

@@ -540,7 +540,6 @@ int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr)
 	return result;
 }
 
-
 int smu7_init(struct pp_smumgr *smumgr)
 {
 	struct smu7_smumgr *smu_data;
@@ -596,6 +595,11 @@ int smu7_init(struct pp_smumgr *smumgr)
 		(cgs_handle_t)smu_data->smu_buffer.handle);
 		return -EINVAL);
 
+	if (smum_is_hw_avfs_present(smumgr))
+		smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
+	else
+		smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
+
 	return 0;
 }
 

+ 7 - 1
drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h

@@ -37,6 +37,11 @@ struct smu7_buffer_entry {
 	unsigned long  handle;
 };
 
+struct smu7_avfs {
+	enum AVFS_BTC_STATUS avfs_btc_status;
+	uint32_t           avfs_btc_param;
+};
+
 struct smu7_smumgr {
 	uint8_t *header;
 	uint8_t *mec_image;
@@ -50,7 +55,8 @@ struct smu7_smumgr {
 	uint32_t                             arb_table_start;
 	uint32_t                             ulv_setting_starts;
 	uint8_t                              security_hard_key;
-	uint32_t acpi_optimization;
+	uint32_t                             acpi_optimization;
+	struct smu7_avfs                     avfs;
 };
 
 

Some files were not shown because too many files changed in this diff