Browse Source

Merge branch 'drm-next-4.19' of git://people.freedesktop.org/~agd5f/linux into drm-next

Fixes for 4.19:
- Add VCN PSP FW loading for RV (this is required on upcoming parts)
- Fix scheduler setup ordering for VCE and UVD
- Few misc display fixes

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180816181840.2786-1-alexander.deucher@amd.com
Dave Airlie 7 years ago
parent
commit
637319c678

+ 5 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c

@@ -131,6 +131,11 @@ psp_cmd_submit_buf(struct psp_context *psp,
 		msleep(1);
 	}
 
+	if (ucode) {
+		ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
+		ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
+	}
+
 	return ret;
 }
 

+ 4 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h

@@ -194,6 +194,7 @@ enum AMDGPU_UCODE_ID {
 	AMDGPU_UCODE_ID_SMC,
 	AMDGPU_UCODE_ID_UVD,
 	AMDGPU_UCODE_ID_VCE,
+	AMDGPU_UCODE_ID_VCN,
 	AMDGPU_UCODE_ID_MAXIMUM,
 };
 
@@ -226,6 +227,9 @@ struct amdgpu_firmware_info {
 	void *kaddr;
 	/* ucode_size_bytes */
 	uint32_t ucode_size;
+	/* starting tmr mc address */
+	uint32_t tmr_mc_addr_lo;
+	uint32_t tmr_mc_addr_hi;
 };
 
 void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);

+ 26 - 12
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c

@@ -122,8 +122,6 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
 
 int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 {
-	struct amdgpu_ring *ring;
-	struct drm_sched_rq *rq;
 	unsigned long bo_size;
 	const char *fw_name;
 	const struct common_firmware_header *hdr;
@@ -266,13 +264,6 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 		}
 	}
 
-	ring = &adev->uvd.inst[0].ring;
-	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-	r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
-	if (r) {
-		DRM_ERROR("Failed setting up UVD kernel entity.\n");
-		return r;
-	}
 	for (i = 0; i < adev->uvd.max_handles; ++i) {
 		atomic_set(&adev->uvd.handles[i], 0);
 		adev->uvd.filp[i] = NULL;
@@ -311,7 +302,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
 		if (adev->uvd.harvest_config & (1 << j))
 			continue;
-		kfree(adev->uvd.inst[j].saved_bo);
+		kvfree(adev->uvd.inst[j].saved_bo);
 
 		amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
 				      &adev->uvd.inst[j].gpu_addr,
@@ -327,6 +318,29 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 	return 0;
 }
 
+/**
+ * amdgpu_uvd_entity_init - init entity
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
+{
+	struct amdgpu_ring *ring;
+	struct drm_sched_rq *rq;
+	int r;
+
+	ring = &adev->uvd.inst[0].ring;
+	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+	r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
+	if (r) {
+		DRM_ERROR("Failed setting up UVD kernel entity.\n");
+		return r;
+	}
+
+	return 0;
+}
+
 int amdgpu_uvd_suspend(struct amdgpu_device *adev)
 {
 	unsigned size;
@@ -354,7 +368,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
 		size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
 		ptr = adev->uvd.inst[j].cpu_addr;
 
-		adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL);
+		adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL);
 		if (!adev->uvd.inst[j].saved_bo)
 			return -ENOMEM;
 
@@ -380,7 +394,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
 
 		if (adev->uvd.inst[i].saved_bo != NULL) {
 			memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
-			kfree(adev->uvd.inst[i].saved_bo);
+			kvfree(adev->uvd.inst[i].saved_bo);
 			adev->uvd.inst[i].saved_bo = NULL;
 		} else {
 			const struct common_firmware_header *hdr;

+ 1 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h

@@ -69,6 +69,7 @@ struct amdgpu_uvd {
 
 int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
 int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
+int amdgpu_uvd_entity_init(struct amdgpu_device *adev);
 int amdgpu_uvd_suspend(struct amdgpu_device *adev);
 int amdgpu_uvd_resume(struct amdgpu_device *adev);
 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,

+ 23 - 10
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c

@@ -90,8 +90,6 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work);
  */
 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
 {
-	struct amdgpu_ring *ring;
-	struct drm_sched_rq *rq;
 	const char *fw_name;
 	const struct common_firmware_header *hdr;
 	unsigned ucode_version, version_major, version_minor, binary_id;
@@ -188,14 +186,6 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
 		return r;
 	}
 
-	ring = &adev->vce.ring[0];
-	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-	r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
-	if (r != 0) {
-		DRM_ERROR("Failed setting up VCE run queue.\n");
-		return r;
-	}
-
 	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
 		atomic_set(&adev->vce.handles[i], 0);
 		adev->vce.filp[i] = NULL;
@@ -235,6 +225,29 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
 	return 0;
 }
 
+/**
+ * amdgpu_vce_entity_init - init entity
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+int amdgpu_vce_entity_init(struct amdgpu_device *adev)
+{
+	struct amdgpu_ring *ring;
+	struct drm_sched_rq *rq;
+	int r;
+
+	ring = &adev->vce.ring[0];
+	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+	r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up VCE run queue.\n");
+		return r;
+	}
+
+	return 0;
+}
+
 /**
  * amdgpu_vce_suspend - unpin VCE fw memory
  *

+ 1 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h

@@ -55,6 +55,7 @@ struct amdgpu_vce {
 
 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size);
 int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
+int amdgpu_vce_entity_init(struct amdgpu_device *adev);
 int amdgpu_vce_suspend(struct amdgpu_device *adev);
 int amdgpu_vce_resume(struct amdgpu_device *adev);
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,

+ 13 - 10
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c

@@ -111,9 +111,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
 			version_major, version_minor, family_id);
 	}
 
-	bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
-		  +  AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
+	bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
 		  +  AMDGPU_VCN_SESSION_SIZE * 40;
+	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 	r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
 				    &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
@@ -129,7 +130,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
 {
 	int i;
 
-	kfree(adev->vcn.saved_bo);
+	kvfree(adev->vcn.saved_bo);
 
 	amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
 			      &adev->vcn.gpu_addr,
@@ -160,7 +161,7 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
 	size = amdgpu_bo_size(adev->vcn.vcpu_bo);
 	ptr = adev->vcn.cpu_addr;
 
-	adev->vcn.saved_bo = kmalloc(size, GFP_KERNEL);
+	adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL);
 	if (!adev->vcn.saved_bo)
 		return -ENOMEM;
 
@@ -182,18 +183,20 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
 
 	if (adev->vcn.saved_bo != NULL) {
 		memcpy_toio(ptr, adev->vcn.saved_bo, size);
-		kfree(adev->vcn.saved_bo);
+		kvfree(adev->vcn.saved_bo);
 		adev->vcn.saved_bo = NULL;
 	} else {
 		const struct common_firmware_header *hdr;
 		unsigned offset;
 
 		hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
-		offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
-		memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
-			    le32_to_cpu(hdr->ucode_size_bytes));
-		size -= le32_to_cpu(hdr->ucode_size_bytes);
-		ptr += le32_to_cpu(hdr->ucode_size_bytes);
+		if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+			offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+			memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
+				    le32_to_cpu(hdr->ucode_size_bytes));
+			size -= le32_to_cpu(hdr->ucode_size_bytes);
+			ptr += le32_to_cpu(hdr->ucode_size_bytes);
+		}
 		memset_io(ptr, 0, size);
 	}
 

+ 3 - 0
drivers/gpu/drm/amd/amdgpu/psp_v10_0.c

@@ -88,6 +88,9 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *
 	case AMDGPU_UCODE_ID_VCE:
 		*type = GFX_FW_TYPE_VCE;
 		break;
+	case AMDGPU_UCODE_ID_VCN:
+		*type = GFX_FW_TYPE_VCN;
+		break;
 	case AMDGPU_UCODE_ID_MAXIMUM:
 	default:
 		return -EINVAL;

+ 4 - 0
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c

@@ -123,6 +123,10 @@ static int uvd_v4_2_sw_init(void *handle)
 	ring = &adev->uvd.inst->ring;
 	sprintf(ring->name, "uvd");
 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+	if (r)
+		return r;
+
+	r = amdgpu_uvd_entity_init(adev);
 
 	return r;
 }

+ 4 - 0
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c

@@ -120,6 +120,10 @@ static int uvd_v5_0_sw_init(void *handle)
 	ring = &adev->uvd.inst->ring;
 	sprintf(ring->name, "uvd");
 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+	if (r)
+		return r;
+
+	r = amdgpu_uvd_entity_init(adev);
 
 	return r;
 }

+ 2 - 0
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c

@@ -440,6 +440,8 @@ static int uvd_v6_0_sw_init(void *handle)
 		}
 	}
 
+	r = amdgpu_uvd_entity_init(adev);
+
 	return r;
 }
 

+ 5 - 0
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c

@@ -410,6 +410,7 @@ static int uvd_v7_0_early_init(void *handle)
 static int uvd_v7_0_sw_init(void *handle)
 {
 	struct amdgpu_ring *ring;
+
 	int i, j, r;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
@@ -478,6 +479,10 @@ static int uvd_v7_0_sw_init(void *handle)
 		}
 	}
 
+	r = amdgpu_uvd_entity_init(adev);
+	if (r)
+		return r;
+
 	r = amdgpu_virt_alloc_mm_table(adev);
 	if (r)
 		return r;

+ 2 - 0
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c

@@ -439,6 +439,8 @@ static int vce_v2_0_sw_init(void *handle)
 			return r;
 	}
 
+	r = amdgpu_vce_entity_init(adev);
+
 	return r;
 }
 

+ 2 - 0
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c

@@ -448,6 +448,8 @@ static int vce_v3_0_sw_init(void *handle)
 			return r;
 	}
 
+	r = amdgpu_vce_entity_init(adev);
+
 	return r;
 }
 

+ 8 - 2
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c

@@ -419,6 +419,7 @@ static int vce_v4_0_sw_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct amdgpu_ring *ring;
+
 	unsigned size;
 	int r, i;
 
@@ -438,7 +439,7 @@ static int vce_v4_0_sw_init(void *handle)
 		const struct common_firmware_header *hdr;
 		unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
 
-		adev->vce.saved_bo = kmalloc(size, GFP_KERNEL);
+		adev->vce.saved_bo = kvmalloc(size, GFP_KERNEL);
 		if (!adev->vce.saved_bo)
 			return -ENOMEM;
 
@@ -474,6 +475,11 @@ static int vce_v4_0_sw_init(void *handle)
 			return r;
 	}
 
+
+	r = amdgpu_vce_entity_init(adev);
+	if (r)
+		return r;
+
 	r = amdgpu_virt_alloc_mm_table(adev);
 	if (r)
 		return r;
@@ -490,7 +496,7 @@ static int vce_v4_0_sw_fini(void *handle)
 	amdgpu_virt_free_mm_table(adev);
 
 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
-		kfree(adev->vce.saved_bo);
+		kvfree(adev->vce.saved_bo);
 		adev->vce.saved_bo = NULL;
 	}
 

+ 31 - 9
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c

@@ -100,6 +100,16 @@ static int vcn_v1_0_sw_init(void *handle)
 	if (r)
 		return r;
 
+	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+		const struct common_firmware_header *hdr;
+		hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+		adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
+		adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
+		adev->firmware.fw_size +=
+			ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
+		DRM_INFO("PSP loading VCN firmware\n");
+	}
+
 	r = amdgpu_vcn_resume(adev);
 	if (r)
 		return r;
@@ -265,26 +275,38 @@ static int vcn_v1_0_resume(void *handle)
 static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
 {
 	uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
-
-	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+	uint32_t offset;
+
+	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+			     (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
+		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+			     (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
+		WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
+		offset = 0;
+	} else {
+		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 			lower_32_bits(adev->vcn.gpu_addr));
-	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 			upper_32_bits(adev->vcn.gpu_addr));
-	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
-				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+		offset = size;
+		WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
+			     AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+	}
+
 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
 
 	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
-			lower_32_bits(adev->vcn.gpu_addr + size));
+		     lower_32_bits(adev->vcn.gpu_addr + offset));
 	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
-			upper_32_bits(adev->vcn.gpu_addr + size));
+		     upper_32_bits(adev->vcn.gpu_addr + offset));
 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE);
 
 	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
-			lower_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
+		     lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
 	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
-			upper_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
+		     upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
 			AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40));

+ 8 - 2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c

@@ -98,10 +98,16 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
  */
 void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
 {
-	struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
-	struct dc_stream_state *stream_state = crtc_state->stream;
+	struct dm_crtc_state *crtc_state;
+	struct dc_stream_state *stream_state;
 	uint32_t crcs[3];
 
+	if (crtc == NULL)
+		return;
+
+	crtc_state = to_dm_crtc_state(crtc->state);
+	stream_state = crtc_state->stream;
+
 	/* Early return if CRC capture is not enabled. */
 	if (!crtc_state->crc_enabled)
 		return;

+ 2 - 0
drivers/gpu/drm/amd/display/dc/core/dc_link.c

@@ -1812,6 +1812,8 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
 	bool is_vga_mode = (stream->timing.h_addressable == 640)
 			&& (stream->timing.v_addressable == 480);
 
+	if (stream->phy_pix_clk == 0)
+		stream->phy_pix_clk = stream->timing.pix_clk_khz;
 	if (stream->phy_pix_clk > 340000)
 		is_over_340mhz = true;
 

+ 34 - 16
drivers/gpu/drm/amd/display/dc/core/dc_resource.c

@@ -268,24 +268,30 @@ bool resource_construct(
 
 	return true;
 }
+static int find_matching_clock_source(
+		const struct resource_pool *pool,
+		struct clock_source *clock_source)
+{
+
+	int i;
 
+	for (i = 0; i < pool->clk_src_count; i++) {
+		if (pool->clock_sources[i] == clock_source)
+			return i;
+	}
+	return -1;
+}
 
 void resource_unreference_clock_source(
 		struct resource_context *res_ctx,
 		const struct resource_pool *pool,
 		struct clock_source *clock_source)
 {
-	int i;
-
-	for (i = 0; i < pool->clk_src_count; i++) {
-		if (pool->clock_sources[i] != clock_source)
-			continue;
+	int i = find_matching_clock_source(pool, clock_source);
 
+	if (i > -1)
 		res_ctx->clock_source_ref_count[i]--;
 
-		break;
-	}
-
 	if (pool->dp_clock_source == clock_source)
 		res_ctx->dp_clock_source_ref_count--;
 }
@@ -295,19 +301,31 @@ void resource_reference_clock_source(
 		const struct resource_pool *pool,
 		struct clock_source *clock_source)
 {
-	int i;
-	for (i = 0; i < pool->clk_src_count; i++) {
-		if (pool->clock_sources[i] != clock_source)
-			continue;
+	int i = find_matching_clock_source(pool, clock_source);
 
+	if (i > -1)
 		res_ctx->clock_source_ref_count[i]++;
-		break;
-	}
 
 	if (pool->dp_clock_source == clock_source)
 		res_ctx->dp_clock_source_ref_count++;
 }
 
+int resource_get_clock_source_reference(
+		struct resource_context *res_ctx,
+		const struct resource_pool *pool,
+		struct clock_source *clock_source)
+{
+	int i = find_matching_clock_source(pool, clock_source);
+
+	if (i > -1)
+		return res_ctx->clock_source_ref_count[i];
+
+	if (pool->dp_clock_source == clock_source)
+		return res_ctx->dp_clock_source_ref_count;
+
+	return -1;
+}
+
 bool resource_are_streams_timing_synchronizable(
 	struct dc_stream_state *stream1,
 	struct dc_stream_state *stream2)
@@ -372,11 +390,11 @@ static bool is_sharable_clk_src(
 		return false;
 
 	if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal)
-			&& dc_is_dvi_signal(pipe->stream->signal))
+			&& dc_is_dual_link_signal(pipe->stream->signal))
 		return false;
 
 	if (dc_is_hdmi_signal(pipe->stream->signal)
-			&& dc_is_dvi_signal(pipe_with_clk_src->stream->signal))
+			&& dc_is_dual_link_signal(pipe_with_clk_src->stream->signal))
 		return false;
 
 	if (!resource_are_streams_timing_synchronizable(

+ 2 - 2
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c

@@ -930,7 +930,7 @@ void dce110_link_encoder_enable_tmds_output(
 	enum bp_result result;
 
 	/* Enable the PHY */
-
+	cntl.connector_obj_id = enc110->base.connector;
 	cntl.action = TRANSMITTER_CONTROL_ENABLE;
 	cntl.engine_id = enc->preferred_engine;
 	cntl.transmitter = enc110->base.transmitter;
@@ -972,7 +972,7 @@ void dce110_link_encoder_enable_dp_output(
 	 * We need to set number of lanes manually.
 	 */
 	configure_encoder(enc110, link_settings);
-
+	cntl.connector_obj_id = enc110->base.connector;
 	cntl.action = TRANSMITTER_CONTROL_ENABLE;
 	cntl.engine_id = enc->preferred_engine;
 	cntl.transmitter = enc110->base.transmitter;

+ 3 - 1
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c

@@ -1908,7 +1908,9 @@ static void dce110_reset_hw_ctx_wrap(
 			pipe_ctx_old->plane_res.mi->funcs->free_mem_input(
 					pipe_ctx_old->plane_res.mi, dc->current_state->stream_count);
 
-			if (old_clk)
+			if (old_clk && 0 == resource_get_clock_source_reference(&context->res_ctx,
+										dc->res_pool,
+										old_clk))
 				old_clk->funcs->cs_power_down(old_clk);
 
 			dc->hwss.disable_plane(dc, pipe_ctx_old);

+ 1 - 1
drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c

@@ -772,7 +772,7 @@ void dce120_tg_set_blank(struct timing_generator *tg,
 
 	CRTC_REG_SET(
 		CRTC0_CRTC_DOUBLE_BUFFER_CONTROL,
-		CRTC_BLANK_DATA_DOUBLE_BUFFER_EN, 0);
+		CRTC_BLANK_DATA_DOUBLE_BUFFER_EN, 1);
 
 	if (enable_blanking)
 		CRTC_REG_SET(CRTC0_CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);

+ 5 - 0
drivers/gpu/drm/amd/display/dc/inc/resource.h

@@ -103,6 +103,11 @@ void resource_reference_clock_source(
 		const struct resource_pool *pool,
 		struct clock_source *clock_source);
 
+int resource_get_clock_source_reference(
+		struct resource_context *res_ctx,
+		const struct resource_pool *pool,
+		struct clock_source *clock_source);
+
 bool resource_are_streams_timing_synchronizable(
 		struct dc_stream_state *stream1,
 		struct dc_stream_state *stream2);