浏览代码

Merge tag 'drm-next-2018-11-02' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Pretty much a normal fixes pull pre-rc1, mostly amdgpu fixes, one i915
  link training regression fix, and a couple of minor panel/bridge fixes
  and a panel quirk"

* tag 'drm-next-2018-11-02' of git://anongit.freedesktop.org/drm/drm: (37 commits)
  drm/amdgpu: revert "enable gfxoff in non-sriov and stutter mode by default"
  drm/amd/pp: Print warning if od_sclk/mclk out of range
  drm/amd/pp: Fix pp_sclk/mclk_od not work on Vega10
  drm/amd/pp: Fix pp_sclk/mclk_od not work on smu7
  drm/amd/powerplay: no MGPU fan boost enablement on DPM disabled
  drm/amdgpu: Fix skipping hangged job reset during gpu recover.
  drm/amd/powerplay: revise Vega20 pptable version check
  drm/amd/display: set backlight level limit to 1
  drm/panel: simple: Innolux TV123WAM is actually P120ZDG-BF1
  dt-bindings: drm/panel: simple: Innolux TV123WAM is actually P120ZDG-BF1
  drm/bridge: ti-sn65dsi86: Remove the mystery delay
  drm/panel: simple: Add "no-hpd" delay for Innolux TV123WAM
  drm/panel: simple: Support panels with HPD where HPD isn't connected
  dt-bindings: drm/panel: simple: Add no-hpd property
  drm/edid: Add 6 bpc quirk for BOE panel.
  drm/amdgpu: fix reporting of failed msg sent to SMU (v2)
  drm/amdgpu: Fix compute ring 1.0.0 failure after reset
  drm/amdgpu: fix VM leaf walking
  drm/amdgpu: fix amdgpu_vm_fini
  drm/amd/powerplay: commonize the API for retrieving current clocks
  ...
Linus Torvalds 6 年之前
父节点
当前提交
bc6080ae38
共有 32 个文件被更改,包括 364 次插入175 次删除
  1. 5 3
      Documentation/devicetree/bindings/display/panel/innolux,p120zdg-bf1.txt
  2. 3 0
      Documentation/devicetree/bindings/display/panel/simple-panel.txt
  3. 4 2
      drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
  4. 2 4
      drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
  5. 2 2
      drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
  6. 1 1
      drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
  7. 12 3
      drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
  8. 5 2
      drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
  9. 4 2
      drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
  10. 1 1
      drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
  11. 4 2
      drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
  12. 7 0
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
  13. 11 5
      drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
  14. 1 1
      drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
  15. 24 9
      drivers/gpu/drm/amd/powerplay/amd_powerplay.c
  16. 6 4
      drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
  17. 1 1
      drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
  18. 39 4
      drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
  19. 8 0
      drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
  20. 45 40
      drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
  21. 25 31
      drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
  22. 1 1
      drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
  23. 4 0
      drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
  24. 16 13
      drivers/gpu/drm/bridge/ti-sn65dsi86.c
  25. 20 1
      drivers/gpu/drm/drm_atomic_helper.c
  26. 6 5
      drivers/gpu/drm/drm_connector.c
  27. 3 0
      drivers/gpu/drm/drm_edid.c
  28. 7 13
      drivers/gpu/drm/i915/intel_dp.c
  29. 4 4
      drivers/gpu/drm/i915/intel_dp_mst.c
  30. 4 10
      drivers/gpu/drm/nouveau/dispnv50/disp.c
  31. 20 9
      drivers/gpu/drm/panel/panel-simple.c
  32. 69 2
      include/drm/drm_connector.h

+ 5 - 3
Documentation/devicetree/bindings/display/panel/innolux,tv123wam.txt → Documentation/devicetree/bindings/display/panel/innolux,p120zdg-bf1.txt

@@ -1,20 +1,22 @@
-Innolux TV123WAM 12.3 inch eDP 2K display panel
+Innolux P120ZDG-BF1 12.02 inch eDP 2K display panel
 
 This binding is compatible with the simple-panel binding, which is specified
 in simple-panel.txt in this directory.
 
 Required properties:
-- compatible: should be "innolux,tv123wam"
+- compatible: should be "innolux,p120zdg-bf1"
 - power-supply: regulator to provide the supply voltage
 
 Optional properties:
 - enable-gpios: GPIO pin to enable or disable the panel
 - backlight: phandle of the backlight device attached to the panel
+- no-hpd: If HPD isn't hooked up; add this property.
 
 Example:
 	panel_edp: panel-edp {
-		compatible = "innolux,tv123wam";
+		compatible = "innolux,p120zdg-bf1";
 		enable-gpios = <&msmgpio 31 GPIO_ACTIVE_LOW>;
 		power-supply = <&pm8916_l2>;
 		backlight = <&backlight>;
+		no-hpd;
 	};

+ 3 - 0
Documentation/devicetree/bindings/display/panel/simple-panel.txt

@@ -11,6 +11,9 @@ Optional properties:
 - ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
 - enable-gpios: GPIO pin to enable or disable the panel
 - backlight: phandle of the backlight device attached to the panel
+- no-hpd: This panel is supposed to communicate that it's ready via HPD
+  (hot plug detect) signal, but the signal isn't hooked up so we should
+  hardcode the max delay from the panel spec when powering up the panel.
 
 Example:
 

+ 4 - 2
drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c

@@ -135,7 +135,8 @@ static int acp_poweroff(struct generic_pm_domain *genpd)
 	 * 2. power off the acp tiles
 	 * 3. check and enter ulv state
 	 */
-		if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+		if (adev->powerplay.pp_funcs &&
+			adev->powerplay.pp_funcs->set_powergating_by_smu)
 			amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
 	}
 	return 0;
@@ -517,7 +518,8 @@ static int acp_set_powergating_state(void *handle,
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	bool enable = state == AMD_PG_STATE_GATE ? true : false;
 
-	if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+	if (adev->powerplay.pp_funcs &&
+		adev->powerplay.pp_funcs->set_powergating_by_smu)
 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
 
 	return 0;

+ 2 - 4
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

@@ -1493,8 +1493,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
 	}
 
 	adev->powerplay.pp_feature = amdgpu_pp_feature_mask;
-	if (amdgpu_sriov_vf(adev))
-		adev->powerplay.pp_feature &= ~PP_GFXOFF_MASK;
 
 	for (i = 0; i < adev->num_ip_blocks; i++) {
 		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
@@ -1600,7 +1598,7 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
 		}
 	}
 
-	if (adev->powerplay.pp_funcs->load_firmware) {
+	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
 		r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
 		if (r) {
 			pr_err("firmware loading failed\n");
@@ -3341,7 +3339,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
 		kthread_park(ring->sched.thread);
 
-		if (job && job->base.sched == &ring->sched)
+		if (job && job->base.sched != &ring->sched)
 			continue;
 
 		drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL);

+ 2 - 2
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c

@@ -114,8 +114,8 @@ uint amdgpu_pg_mask = 0xffffffff;
 uint amdgpu_sdma_phase_quantum = 32;
 char *amdgpu_disable_cu = NULL;
 char *amdgpu_virtual_display = NULL;
-/* OverDrive(bit 14) disabled by default*/
-uint amdgpu_pp_feature_mask = 0xffffbfff;
+/* OverDrive(bit 14),gfxoff(bit 15),stutter mode(bit 17) disabled by default*/
+uint amdgpu_pp_feature_mask = 0xfffd3fff;
 int amdgpu_ngg = 0;
 int amdgpu_prim_buf_per_se = 0;
 int amdgpu_pos_buf_per_se = 0;

+ 1 - 1
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c

@@ -392,7 +392,7 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
 	if (!(adev->powerplay.pp_feature & PP_GFXOFF_MASK))
 		return;
 
-	if (!adev->powerplay.pp_funcs->set_powergating_by_smu)
+	if (!adev->powerplay.pp_funcs || !adev->powerplay.pp_funcs->set_powergating_by_smu)
 		return;
 
 

+ 12 - 3
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c

@@ -704,7 +704,10 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
 		return ret;
 
 	if (adev->powerplay.pp_funcs->force_clock_level)
-		amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
+		ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
+
+	if (ret)
+		return -EINVAL;
 
 	return count;
 }
@@ -737,7 +740,10 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
 		return ret;
 
 	if (adev->powerplay.pp_funcs->force_clock_level)
-		amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
+		ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
+
+	if (ret)
+		return -EINVAL;
 
 	return count;
 }
@@ -770,7 +776,10 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
 		return ret;
 
 	if (adev->powerplay.pp_funcs->force_clock_level)
-		amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
+		ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
+
+	if (ret)
+		return -EINVAL;
 
 	return count;
 }

+ 5 - 2
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

@@ -542,7 +542,8 @@ static void amdgpu_vm_pt_next_leaf(struct amdgpu_device *adev,
 				   struct amdgpu_vm_pt_cursor *cursor)
 {
 	amdgpu_vm_pt_next(adev, cursor);
-	while (amdgpu_vm_pt_descendant(adev, cursor));
+	if (cursor->pfn != ~0ll)
+		while (amdgpu_vm_pt_descendant(adev, cursor));
 }
 
 /**
@@ -3234,8 +3235,10 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	}
 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
 					     &vm->va.rb_root, rb) {
+		/* Don't remove the mapping here, we don't want to trigger a
+		 * rebalance and the tree is about to be destroyed anyway.
+		 */
 		list_del(&mapping->list);
-		amdgpu_vm_it_remove(mapping, &vm->va);
 		kfree(mapping);
 	}
 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {

+ 4 - 2
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c

@@ -4815,8 +4815,10 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
 	if (r)
 		goto done;
 
-	/* Test KCQs */
-	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+	/* Test KCQs - reversing the order of rings seems to fix ring test failure
+	 * after GPU reset
+	 */
+	for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) {
 		ring = &adev->gfx.compute_ring[i];
 		ring->ready = true;
 		r = amdgpu_ring_test_ring(ring);

+ 1 - 1
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c

@@ -280,7 +280,7 @@ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
 		return;
 
 	if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) {
-		if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+		if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu)
 			amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
 
 	}

+ 4 - 2
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c

@@ -1366,7 +1366,8 @@ static int sdma_v4_0_hw_init(void *handle)
 	int r;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs->set_powergating_by_smu)
+	if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs &&
+			adev->powerplay.pp_funcs->set_powergating_by_smu)
 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
 
 	sdma_v4_0_init_golden_registers(adev);
@@ -1386,7 +1387,8 @@ static int sdma_v4_0_hw_fini(void *handle)
 	sdma_v4_0_ctx_switch_enable(adev, false);
 	sdma_v4_0_enable(adev, false);
 
-	if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs->set_powergating_by_smu)
+	if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs
+			&& adev->powerplay.pp_funcs->set_powergating_by_smu)
 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true);
 
 	return 0;

+ 7 - 0
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c

@@ -1524,6 +1524,13 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
 {
 	struct amdgpu_display_manager *dm = bl_get_data(bd);
 
+	/*
+	 * PWM interperts 0 as 100% rather than 0% because of HW
+	 * limitation for level 0.So limiting minimum brightness level
+	 * to 1.
+	 */
+	if (bd->props.brightness < 1)
+		return 1;
 	if (dc_link_set_backlight_level(dm->backlight_link,
 			bd->props.brightness, 0, 0))
 		return 0;

+ 11 - 5
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c

@@ -101,7 +101,7 @@ bool dm_pp_apply_display_requirements(
 			adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
 		}
 
-		if (adev->powerplay.pp_funcs->display_configuration_change)
+		if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_configuration_change)
 			adev->powerplay.pp_funcs->display_configuration_change(
 				adev->powerplay.pp_handle,
 				&adev->pm.pm_display_cfg);
@@ -304,7 +304,7 @@ bool dm_pp_get_clock_levels_by_type(
 	struct amd_pp_simple_clock_info validation_clks = { 0 };
 	uint32_t i;
 
-	if (adev->powerplay.pp_funcs->get_clock_by_type) {
+	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) {
 		if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
 			dc_to_pp_clock_type(clk_type), &pp_clks)) {
 		/* Error in pplib. Provide default values. */
@@ -315,7 +315,7 @@ bool dm_pp_get_clock_levels_by_type(
 
 	pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
 
-	if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
+	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
 		if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
 						pp_handle, &validation_clks)) {
 			/* Error in pplib. Provide default values. */
@@ -398,6 +398,9 @@ bool dm_pp_get_clock_levels_by_type_with_voltage(
 	struct pp_clock_levels_with_voltage pp_clk_info = {0};
 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
+	if (!pp_funcs || !pp_funcs->get_clock_by_type_with_voltage)
+		return false;
+
 	if (pp_funcs->get_clock_by_type_with_voltage(pp_handle,
 						     dc_to_pp_clock_type(clk_type),
 						     &pp_clk_info))
@@ -438,7 +441,7 @@ bool dm_pp_apply_clock_for_voltage_request(
 	if (!pp_clock_request.clock_type)
 		return false;
 
-	if (adev->powerplay.pp_funcs->display_clock_voltage_request)
+	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_clock_voltage_request)
 		ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
 			adev->powerplay.pp_handle,
 			&pp_clock_request);
@@ -455,7 +458,7 @@ bool dm_pp_get_static_clocks(
 	struct amd_pp_clock_info pp_clk_info = {0};
 	int ret = 0;
 
-	if (adev->powerplay.pp_funcs->get_current_clocks)
+	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_current_clocks)
 		ret = adev->powerplay.pp_funcs->get_current_clocks(
 			adev->powerplay.pp_handle,
 			&pp_clk_info);
@@ -505,6 +508,9 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
 	wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
 	wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
 
+	if (!pp_funcs || !pp_funcs->set_watermarks_for_clocks_ranges)
+		return;
+
 	for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
 		if (ranges->reader_wm_sets[i].wm_inst > 3)
 			wm_dce_clocks[i].wm_set_id = WM_SET_A;

+ 1 - 1
drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c

@@ -568,7 +568,7 @@ static struct input_pixel_processor *dce110_ipp_create(
 
 static const struct encoder_feature_support link_enc_feature = {
 		.max_hdmi_deep_color = COLOR_DEPTH_121212,
-		.max_hdmi_pixel_clock = 594000,
+		.max_hdmi_pixel_clock = 300000,
 		.flags.bits.IS_HBR2_CAPABLE = true,
 		.flags.bits.IS_TPS3_CAPABLE = true
 };

+ 24 - 9
drivers/gpu/drm/amd/powerplay/amd_powerplay.c

@@ -723,11 +723,14 @@ static int pp_dpm_force_clock_level(void *handle,
 		pr_info("%s was not implemented.\n", __func__);
 		return 0;
 	}
+
+	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+		pr_info("force clock level is for dpm manual mode only.\n");
+		return -EINVAL;
+	}
+
 	mutex_lock(&hwmgr->smu_lock);
-	if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
-		ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
-	else
-		ret = -EINVAL;
+	ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
 	mutex_unlock(&hwmgr->smu_lock);
 	return ret;
 }
@@ -963,6 +966,7 @@ static int pp_dpm_switch_power_profile(void *handle,
 static int pp_set_power_limit(void *handle, uint32_t limit)
 {
 	struct pp_hwmgr *hwmgr = handle;
+	uint32_t max_power_limit;
 
 	if (!hwmgr || !hwmgr->pm_en)
 		return -EINVAL;
@@ -975,7 +979,13 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
 	if (limit == 0)
 		limit = hwmgr->default_power_limit;
 
-	if (limit > hwmgr->default_power_limit)
+	max_power_limit = hwmgr->default_power_limit;
+	if (hwmgr->od_enabled) {
+		max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
+		max_power_limit /= 100;
+	}
+
+	if (limit > max_power_limit)
 		return -EINVAL;
 
 	mutex_lock(&hwmgr->smu_lock);
@@ -994,8 +1004,13 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
 
 	mutex_lock(&hwmgr->smu_lock);
 
-	if (default_limit)
+	if (default_limit) {
 		*limit = hwmgr->default_power_limit;
+		if (hwmgr->od_enabled) {
+			*limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
+			*limit /= 100;
+		}
+	}
 	else
 		*limit = hwmgr->power_limit;
 
@@ -1303,12 +1318,12 @@ static int pp_enable_mgpu_fan_boost(void *handle)
 {
 	struct pp_hwmgr *hwmgr = handle;
 
-	if (!hwmgr || !hwmgr->pm_en)
+	if (!hwmgr)
 		return -EINVAL;
 
-	if (hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL) {
+	if (!hwmgr->pm_en ||
+	     hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
 		return 0;
-	}
 
 	mutex_lock(&hwmgr->smu_lock);
 	hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);

+ 6 - 4
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c

@@ -3588,9 +3588,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
 			break;
 	}
 
-	if (i >= sclk_table->count)
+	if (i >= sclk_table->count) {
 		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
-	else {
+		sclk_table->dpm_levels[i-1].value = sclk;
+	} else {
 	/* TODO: Check SCLK in DAL's minimum clocks
 	 * in case DeepSleep divider update is required.
 	 */
@@ -3605,9 +3606,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
 			break;
 	}
 
-	if (i >= mclk_table->count)
+	if (i >= mclk_table->count) {
 		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-
+		mclk_table->dpm_levels[i-1].value = mclk;
+	}
 
 	if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
 		data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;

+ 1 - 1
drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c

@@ -718,7 +718,7 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
 		table->WatermarkRow[1][i].MaxClock =
 			cpu_to_le16((uint16_t)
 			(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) /
-			100);
+			1000);
 		table->WatermarkRow[1][i].MinUclk =
 			cpu_to_le16((uint16_t)
 			(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) /

+ 39 - 4
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c

@@ -1333,7 +1333,6 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 	if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
 		hwmgr->platform_descriptor.overdriveLimit.memoryClock =
 					dpm_table->dpm_levels[dpm_table->count-1].value;
-
 	vega10_init_dpm_state(&(dpm_table->dpm_state));
 
 	data->dpm_table.eclk_table.count = 0;
@@ -3249,6 +3248,37 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
 {
 	struct vega10_hwmgr *data = hwmgr->backend;
+	const struct phm_set_power_state_input *states =
+			(const struct phm_set_power_state_input *)input;
+	const struct vega10_power_state *vega10_ps =
+			cast_const_phw_vega10_power_state(states->pnew_state);
+	struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
+	uint32_t sclk = vega10_ps->performance_levels
+			[vega10_ps->performance_level_count - 1].gfx_clock;
+	struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
+	uint32_t mclk = vega10_ps->performance_levels
+			[vega10_ps->performance_level_count - 1].mem_clock;
+	uint32_t i;
+
+	for (i = 0; i < sclk_table->count; i++) {
+		if (sclk == sclk_table->dpm_levels[i].value)
+			break;
+	}
+
+	if (i >= sclk_table->count) {
+		data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+		sclk_table->dpm_levels[i-1].value = sclk;
+	}
+
+	for (i = 0; i < mclk_table->count; i++) {
+		if (mclk == mclk_table->dpm_levels[i].value)
+			break;
+	}
+
+	if (i >= mclk_table->count) {
+		data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+		mclk_table->dpm_levels[i-1].value = mclk;
+	}
 
 	if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
 		data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
@@ -4529,11 +4559,13 @@ static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
 
 	if (vega10_ps->performance_levels
 			[vega10_ps->performance_level_count - 1].gfx_clock >
-			hwmgr->platform_descriptor.overdriveLimit.engineClock)
+			hwmgr->platform_descriptor.overdriveLimit.engineClock) {
 		vega10_ps->performance_levels
 		[vega10_ps->performance_level_count - 1].gfx_clock =
 				hwmgr->platform_descriptor.overdriveLimit.engineClock;
-
+		pr_warn("max sclk supported by vbios is %d\n",
+				hwmgr->platform_descriptor.overdriveLimit.engineClock);
+	}
 	return 0;
 }
 
@@ -4581,10 +4613,13 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
 
 	if (vega10_ps->performance_levels
 			[vega10_ps->performance_level_count - 1].mem_clock >
-			hwmgr->platform_descriptor.overdriveLimit.memoryClock)
+			hwmgr->platform_descriptor.overdriveLimit.memoryClock) {
 		vega10_ps->performance_levels
 		[vega10_ps->performance_level_count - 1].mem_clock =
 				hwmgr->platform_descriptor.overdriveLimit.memoryClock;
+		pr_warn("max mclk supported by vbios is %d\n",
+				hwmgr->platform_descriptor.overdriveLimit.memoryClock);
+	}
 
 	return 0;
 }

+ 8 - 0
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c

@@ -2356,6 +2356,13 @@ static int vega12_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
 		return vega12_disable_gfx_off(hwmgr);
 }
 
+static int vega12_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
+				PHM_PerformanceLevelDesignation designation, uint32_t index,
+				PHM_PerformanceLevel *level)
+{
+	return 0;
+}
+
 static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
 	.backend_init = vega12_hwmgr_backend_init,
 	.backend_fini = vega12_hwmgr_backend_fini,
@@ -2406,6 +2413,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
 	.register_irq_handlers = smu9_register_irq_handlers,
 	.start_thermal_controller = vega12_start_thermal_controller,
 	.powergate_gfx = vega12_gfx_off_control,
+	.get_performance_level = vega12_get_performance_level,
 };
 
 int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)

+ 45 - 40
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c

@@ -1875,38 +1875,20 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
 	return ret;
 }
 
-static int vega20_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx_freq)
+static int vega20_get_current_clk_freq(struct pp_hwmgr *hwmgr,
+		PPCLK_e clk_id, uint32_t *clk_freq)
 {
-	uint32_t gfx_clk = 0;
 	int ret = 0;
 
-	*gfx_freq = 0;
+	*clk_freq = 0;
 
 	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
-			PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16))) == 0,
-			"[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
+			PPSMC_MSG_GetDpmClockFreq, (clk_id << 16))) == 0,
+			"[GetCurrentClkFreq] Attempt to get Current Frequency Failed!",
 			return ret);
-	gfx_clk = smum_get_argument(hwmgr);
+	*clk_freq = smum_get_argument(hwmgr);
 
-	*gfx_freq = gfx_clk * 100;
-
-	return 0;
-}
-
-static int vega20_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_freq)
-{
-	uint32_t mem_clk = 0;
-	int ret = 0;
-
-	*mclk_freq = 0;
-
-	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
-			PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16))) == 0,
-			"[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
-			return ret);
-	mem_clk = smum_get_argument(hwmgr);
-
-	*mclk_freq = mem_clk * 100;
+	*clk_freq = *clk_freq * 100;
 
 	return 0;
 }
@@ -1937,12 +1919,16 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
 
 	switch (idx) {
 	case AMDGPU_PP_SENSOR_GFX_SCLK:
-		ret = vega20_get_current_gfx_clk_freq(hwmgr, (uint32_t *)value);
+		ret = vega20_get_current_clk_freq(hwmgr,
+				PPCLK_GFXCLK,
+				(uint32_t *)value);
 		if (!ret)
 			*size = 4;
 		break;
 	case AMDGPU_PP_SENSOR_GFX_MCLK:
-		ret = vega20_get_current_mclk_freq(hwmgr, (uint32_t *)value);
+		ret = vega20_get_current_clk_freq(hwmgr,
+				PPCLK_UCLK,
+				(uint32_t *)value);
 		if (!ret)
 			*size = 4;
 		break;
@@ -2012,7 +1998,6 @@ int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
 	if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
 		switch (clk_type) {
 		case amd_pp_dcef_clock:
-			clk_freq = clock_req->clock_freq_in_khz / 100;
 			clk_select = PPCLK_DCEFCLK;
 			break;
 		case amd_pp_disp_clock:
@@ -2041,11 +2026,20 @@ int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
 	return result;
 }
 
+static int vega20_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
+				PHM_PerformanceLevelDesignation designation, uint32_t index,
+				PHM_PerformanceLevel *level)
+{
+	return 0;
+}
+
 static int vega20_notify_smc_display_config_after_ps_adjustment(
 		struct pp_hwmgr *hwmgr)
 {
 	struct vega20_hwmgr *data =
 			(struct vega20_hwmgr *)(hwmgr->backend);
+	struct vega20_single_dpm_table *dpm_table =
+			&data->dpm_table.mem_table;
 	struct PP_Clocks min_clocks = {0};
 	struct pp_display_clock_request clock_req;
 	int ret = 0;
@@ -2063,7 +2057,7 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
 
 	if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
 		clock_req.clock_type = amd_pp_dcef_clock;
-		clock_req.clock_freq_in_khz = min_clocks.dcefClock;
+		clock_req.clock_freq_in_khz = min_clocks.dcefClock * 10;
 		if (!vega20_display_clock_voltage_request(hwmgr, &clock_req)) {
 			if (data->smu_features[GNLD_DS_DCEFCLK].supported)
 				PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(
@@ -2076,6 +2070,15 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
 		}
 	}
 
+	if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+		dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100;
+		PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+				PPSMC_MSG_SetHardMinByFreq,
+				(PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+				"[SetHardMinFreq] Set hard min uclk failed!",
+				return ret);
+	}
+
 	return 0;
 }
 
@@ -2353,7 +2356,7 @@ static int vega20_get_sclks(struct pp_hwmgr *hwmgr,
 
 	for (i = 0; i < count; i++) {
 		clocks->data[i].clocks_in_khz =
-			dpm_table->dpm_levels[i].value * 100;
+			dpm_table->dpm_levels[i].value * 1000;
 		clocks->data[i].latency_in_us = 0;
 	}
 
@@ -2383,7 +2386,7 @@ static int vega20_get_memclocks(struct pp_hwmgr *hwmgr,
 	for (i = 0; i < count; i++) {
 		clocks->data[i].clocks_in_khz =
 			data->mclk_latency_table.entries[i].frequency =
-			dpm_table->dpm_levels[i].value * 100;
+			dpm_table->dpm_levels[i].value * 1000;
 		clocks->data[i].latency_in_us =
 			data->mclk_latency_table.entries[i].latency =
 			vega20_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
@@ -2408,7 +2411,7 @@ static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr,
 
 	for (i = 0; i < count; i++) {
 		clocks->data[i].clocks_in_khz =
-			dpm_table->dpm_levels[i].value * 100;
+			dpm_table->dpm_levels[i].value * 1000;
 		clocks->data[i].latency_in_us = 0;
 	}
 
@@ -2431,7 +2434,7 @@ static int vega20_get_socclocks(struct pp_hwmgr *hwmgr,
 
 	for (i = 0; i < count; i++) {
 		clocks->data[i].clocks_in_khz =
-			dpm_table->dpm_levels[i].value * 100;
+			dpm_table->dpm_levels[i].value * 1000;
 		clocks->data[i].latency_in_us = 0;
 	}
 
@@ -2582,11 +2585,11 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
 				return -EINVAL;
 			}
 
-			if (input_clk < clocks.data[0].clocks_in_khz / 100 ||
+			if (input_clk < clocks.data[0].clocks_in_khz / 1000 ||
 			    input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) {
 				pr_info("clock freq %d is not within allowed range [%d - %d]\n",
 					input_clk,
-					clocks.data[0].clocks_in_khz / 100,
+					clocks.data[0].clocks_in_khz / 1000,
 					od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
 				return -EINVAL;
 			}
@@ -2726,7 +2729,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
 
 	switch (type) {
 	case PP_SCLK:
-		ret = vega20_get_current_gfx_clk_freq(hwmgr, &now);
+		ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now);
 		PP_ASSERT_WITH_CODE(!ret,
 				"Attempt to get current gfx clk Failed!",
 				return ret);
@@ -2738,12 +2741,12 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
 
 		for (i = 0; i < clocks.num_levels; i++)
 			size += sprintf(buf + size, "%d: %uMhz %s\n",
-				i, clocks.data[i].clocks_in_khz / 100,
+				i, clocks.data[i].clocks_in_khz / 1000,
 				(clocks.data[i].clocks_in_khz == now) ? "*" : "");
 		break;
 
 	case PP_MCLK:
-		ret = vega20_get_current_mclk_freq(hwmgr, &now);
+		ret = vega20_get_current_clk_freq(hwmgr, PPCLK_UCLK, &now);
 		PP_ASSERT_WITH_CODE(!ret,
 				"Attempt to get current mclk freq Failed!",
 				return ret);
@@ -2755,7 +2758,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
 
 		for (i = 0; i < clocks.num_levels; i++)
 			size += sprintf(buf + size, "%d: %uMhz %s\n",
-				i, clocks.data[i].clocks_in_khz / 100,
+				i, clocks.data[i].clocks_in_khz / 1000,
 				(clocks.data[i].clocks_in_khz == now) ? "*" : "");
 		break;
 
@@ -2820,7 +2823,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
 					return ret);
 
 			size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
-				clocks.data[0].clocks_in_khz / 100,
+				clocks.data[0].clocks_in_khz / 1000,
 				od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
 		}
 
@@ -3476,6 +3479,8 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
 		vega20_set_watermarks_for_clocks_ranges,
 	.display_clock_voltage_request =
 		vega20_display_clock_voltage_request,
+	.get_performance_level =
+		vega20_get_performance_level,
 	/* UMD pstate, profile related */
 	.force_dpm_level =
 		vega20_dpm_force_dpm_level,

+ 25 - 31
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c

@@ -642,8 +642,14 @@ static int check_powerplay_tables(
 		"Unsupported PPTable format!", return -1);
 	PP_ASSERT_WITH_CODE(powerplay_table->sHeader.structuresize > 0,
 		"Invalid PowerPlay Table!", return -1);
-	PP_ASSERT_WITH_CODE(powerplay_table->smcPPTable.Version == PPTABLE_V20_SMU_VERSION,
-		"Unmatch PPTable version, vbios update may be needed!", return -1);
+
+	if (powerplay_table->smcPPTable.Version != PPTABLE_V20_SMU_VERSION) {
+		pr_info("Unmatch PPTable version: "
+			"pptable from VBIOS is V%d while driver supported is V%d!",
+			powerplay_table->smcPPTable.Version,
+			PPTABLE_V20_SMU_VERSION);
+		return -EINVAL;
+	}
 
 	//dump_pptable(&powerplay_table->smcPPTable);
 
@@ -716,10 +722,6 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
 		"[appendVbiosPPTable] Failed to retrieve Smc Dpm Table from VBIOS!",
 		return -1);
 
-	memset(ppsmc_pptable->Padding32,
-			0,
-			sizeof(struct atom_smc_dpm_info_v4_4) -
-			sizeof(struct atom_common_table_header));
 	ppsmc_pptable->MaxVoltageStepGfx = smc_dpm_table->maxvoltagestepgfx;
 	ppsmc_pptable->MaxVoltageStepSoc = smc_dpm_table->maxvoltagestepsoc;
 
@@ -778,22 +780,19 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
 	ppsmc_pptable->FllGfxclkSpreadPercent = smc_dpm_table->fllgfxclkspreadpercent;
 	ppsmc_pptable->FllGfxclkSpreadFreq = smc_dpm_table->fllgfxclkspreadfreq;
 
-	if ((smc_dpm_table->table_header.format_revision == 4) &&
-	    (smc_dpm_table->table_header.content_revision == 4)) {
-		for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) {
-			ppsmc_pptable->I2cControllers[i].Enabled =
-				smc_dpm_table->i2ccontrollers[i].enabled;
-			ppsmc_pptable->I2cControllers[i].SlaveAddress =
-				smc_dpm_table->i2ccontrollers[i].slaveaddress;
-			ppsmc_pptable->I2cControllers[i].ControllerPort =
-				smc_dpm_table->i2ccontrollers[i].controllerport;
-			ppsmc_pptable->I2cControllers[i].ThermalThrottler =
-				smc_dpm_table->i2ccontrollers[i].thermalthrottler;
-			ppsmc_pptable->I2cControllers[i].I2cProtocol =
-				smc_dpm_table->i2ccontrollers[i].i2cprotocol;
-			ppsmc_pptable->I2cControllers[i].I2cSpeed =
-				smc_dpm_table->i2ccontrollers[i].i2cspeed;
-		}
+	for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) {
+		ppsmc_pptable->I2cControllers[i].Enabled =
+			smc_dpm_table->i2ccontrollers[i].enabled;
+		ppsmc_pptable->I2cControllers[i].SlaveAddress =
+			smc_dpm_table->i2ccontrollers[i].slaveaddress;
+		ppsmc_pptable->I2cControllers[i].ControllerPort =
+			smc_dpm_table->i2ccontrollers[i].controllerport;
+		ppsmc_pptable->I2cControllers[i].ThermalThrottler =
+			smc_dpm_table->i2ccontrollers[i].thermalthrottler;
+		ppsmc_pptable->I2cControllers[i].I2cProtocol =
+			smc_dpm_table->i2ccontrollers[i].i2cprotocol;
+		ppsmc_pptable->I2cControllers[i].I2cSpeed =
+			smc_dpm_table->i2ccontrollers[i].i2cspeed;
 	}
 
 	return 0;
@@ -882,15 +881,10 @@ static int init_powerplay_table_information(
 	if (pptable_information->smc_pptable == NULL)
 		return -ENOMEM;
 
-	if (powerplay_table->smcPPTable.Version <= 2)
-		memcpy(pptable_information->smc_pptable,
-				&(powerplay_table->smcPPTable),
-				sizeof(PPTable_t) -
-				sizeof(I2cControllerConfig_t) * I2C_CONTROLLER_NAME_COUNT);
-	else
-		memcpy(pptable_information->smc_pptable,
-				&(powerplay_table->smcPPTable),
-				sizeof(PPTable_t));
+	memcpy(pptable_information->smc_pptable,
+			&(powerplay_table->smcPPTable),
+			sizeof(PPTable_t));
+
 
 	result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable));
 

+ 1 - 1
drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h

@@ -29,7 +29,7 @@
 // any structure is changed in this file
 #define SMU11_DRIVER_IF_VERSION 0x12
 
-#define PPTABLE_V20_SMU_VERSION 2
+#define PPTABLE_V20_SMU_VERSION 3
 
 #define NUM_GFXCLK_DPM_LEVELS  16
 #define NUM_VCLK_DPM_LEVELS    8

+ 4 - 0
drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c

@@ -71,7 +71,11 @@ static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
 	result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
 					SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
 	if (result != 0) {
+		/* Read the last message to SMU, to report actual cause */
+		uint32_t val = cgs_read_register(hwmgr->device,
+						 mmSMU_MP1_SRBM2P_MSG_0);
 		pr_err("smu8_send_msg_to_smc_async (0x%04x) failed\n", msg);
+		pr_err("SMU still servicing msg (0x%04x)\n", val);
 		return result;
 	}
 

+ 16 - 13
drivers/gpu/drm/bridge/ti-sn65dsi86.c

@@ -458,18 +458,6 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge)
 	unsigned int val;
 	int ret;
 
-	/*
-	 * FIXME:
-	 * This 70ms was found necessary by experimentation. If it's not
-	 * present, link training fails. It seems like it can go anywhere from
-	 * pre_enable() up to semi-auto link training initiation below.
-	 *
-	 * Neither the datasheet for the bridge nor the panel tested mention a
-	 * delay of this magnitude in the timing requirements. So for now, add
-	 * the mystery delay until someone figures out a better fix.
-	 */
-	msleep(70);
-
 	/* DSI_A lane config */
 	val = CHA_DSI_LANES(4 - pdata->dsi->lanes);
 	regmap_update_bits(pdata->regmap, SN_DSI_LANES_REG,
@@ -536,7 +524,22 @@ static void ti_sn_bridge_pre_enable(struct drm_bridge *bridge)
 	/* configure bridge ref_clk */
 	ti_sn_bridge_set_refclk_freq(pdata);
 
-	/* in case drm_panel is connected then HPD is not supported */
+	/*
+	 * HPD on this bridge chip is a bit useless.  This is an eDP bridge
+	 * so the HPD is an internal signal that's only there to signal that
+	 * the panel is done powering up.  ...but the bridge chip debounces
+	 * this signal by between 100 ms and 400 ms (depending on process,
+	 * voltage, and temperate--I measured it at about 200 ms).  One
+	 * particular panel asserted HPD 84 ms after it was powered on meaning
+	 * that we saw HPD 284 ms after power on.  ...but the same panel said
+	 * that instead of looking at HPD you could just hardcode a delay of
+	 * 200 ms.  We'll assume that the panel driver will have the hardcoded
+	 * delay in its prepare and always disable HPD.
+	 *
+	 * If HPD somehow makes sense on some future panel we'll have to
+	 * change this to be conditional on someone specifying that HPD should
+	 * be used.
+	 */
 	regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE,
 			   HPD_DISABLE);
 

+ 20 - 1
drivers/gpu/drm/drm_atomic_helper.c

@@ -308,6 +308,26 @@ update_connector_routing(struct drm_atomic_state *state,
 		return 0;
 	}
 
+	crtc_state = drm_atomic_get_new_crtc_state(state,
+						   new_connector_state->crtc);
+	/*
+	 * For compatibility with legacy users, we want to make sure that
+	 * we allow DPMS On->Off modesets on unregistered connectors. Modesets
+	 * which would result in anything else must be considered invalid, to
+	 * avoid turning on new displays on dead connectors.
+	 *
+	 * Since the connector can be unregistered at any point during an
+	 * atomic check or commit, this is racy. But that's OK: all we care
+	 * about is ensuring that userspace can't do anything but shut off the
+	 * display on a connector that was destroyed after its been notified,
+	 * not before.
+	 */
+	if (drm_connector_is_unregistered(connector) && crtc_state->active) {
+		DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n",
+				 connector->base.id, connector->name);
+		return -EINVAL;
+	}
+
 	funcs = connector->helper_private;
 
 	if (funcs->atomic_best_encoder)
@@ -352,7 +372,6 @@ update_connector_routing(struct drm_atomic_state *state,
 
 	set_best_encoder(state, new_connector_state, new_encoder);
 
-	crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
 	crtc_state->connectors_changed = true;
 
 	DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",

+ 6 - 5
drivers/gpu/drm/drm_connector.c

@@ -379,7 +379,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
 	/* The connector should have been removed from userspace long before
 	 * it is finally destroyed.
 	 */
-	if (WARN_ON(connector->registered))
+	if (WARN_ON(connector->registration_state ==
+		    DRM_CONNECTOR_REGISTERED))
 		drm_connector_unregister(connector);
 
 	if (connector->tile_group) {
@@ -436,7 +437,7 @@ int drm_connector_register(struct drm_connector *connector)
 		return 0;
 
 	mutex_lock(&connector->mutex);
-	if (connector->registered)
+	if (connector->registration_state != DRM_CONNECTOR_INITIALIZING)
 		goto unlock;
 
 	ret = drm_sysfs_connector_add(connector);
@@ -456,7 +457,7 @@ int drm_connector_register(struct drm_connector *connector)
 
 	drm_mode_object_register(connector->dev, &connector->base);
 
-	connector->registered = true;
+	connector->registration_state = DRM_CONNECTOR_REGISTERED;
 	goto unlock;
 
 err_debugfs:
@@ -478,7 +479,7 @@ EXPORT_SYMBOL(drm_connector_register);
 void drm_connector_unregister(struct drm_connector *connector)
 {
 	mutex_lock(&connector->mutex);
-	if (!connector->registered) {
+	if (connector->registration_state != DRM_CONNECTOR_REGISTERED) {
 		mutex_unlock(&connector->mutex);
 		return;
 	}
@@ -489,7 +490,7 @@ void drm_connector_unregister(struct drm_connector *connector)
 	drm_sysfs_connector_remove(connector);
 	drm_debugfs_connector_remove(connector);
 
-	connector->registered = false;
+	connector->registration_state = DRM_CONNECTOR_UNREGISTERED;
 	mutex_unlock(&connector->mutex);
 }
 EXPORT_SYMBOL(drm_connector_unregister);

+ 3 - 0
drivers/gpu/drm/drm_edid.c

@@ -122,6 +122,9 @@ static const struct edid_quirk {
 	/* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
 	{ "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC },
 
+	/* BOE model 0x0771 reports 8 bpc, but is a 6 bpc panel */
+	{ "BOE", 0x0771, EDID_QUIRK_FORCE_6BPC },
+
 	/* Belinea 10 15 55 */
 	{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
 	{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },

+ 7 - 13
drivers/gpu/drm/i915/intel_dp.c

@@ -5102,19 +5102,13 @@ intel_dp_long_pulse(struct intel_connector *connector,
 		 */
 		status = connector_status_disconnected;
 		goto out;
-	} else {
-		/*
-		 * If display is now connected check links status,
-		 * there has been known issues of link loss triggering
-		 * long pulse.
-		 *
-		 * Some sinks (eg. ASUS PB287Q) seem to perform some
-		 * weird HPD ping pong during modesets. So we can apparently
-		 * end up with HPD going low during a modeset, and then
-		 * going back up soon after. And once that happens we must
-		 * retrain the link to get a picture. That's in case no
-		 * userspace component reacted to intermittent HPD dip.
-		 */
+	}
+
+	/*
+	 * Some external monitors do not signal loss of link synchronization
+	 * with an IRQ_HPD, so force a link status check.
+	 */
+	if (!intel_dp_is_edp(intel_dp)) {
 		struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
 
 		intel_dp_retrain_link(encoder, ctx);

+ 4 - 4
drivers/gpu/drm/i915/intel_dp_mst.c

@@ -77,7 +77,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
 	pipe_config->pbn = mst_pbn;
 
 	/* Zombie connectors can't have VCPI slots */
-	if (READ_ONCE(connector->registered)) {
+	if (!drm_connector_is_unregistered(connector)) {
 		slots = drm_dp_atomic_find_vcpi_slots(state,
 						      &intel_dp->mst_mgr,
 						      port,
@@ -313,7 +313,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
 	struct edid *edid;
 	int ret;
 
-	if (!READ_ONCE(connector->registered))
+	if (drm_connector_is_unregistered(connector))
 		return intel_connector_update_modes(connector, NULL);
 
 	edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
@@ -329,7 +329,7 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
 	struct intel_connector *intel_connector = to_intel_connector(connector);
 	struct intel_dp *intel_dp = intel_connector->mst_port;
 
-	if (!READ_ONCE(connector->registered))
+	if (drm_connector_is_unregistered(connector))
 		return connector_status_disconnected;
 	return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr,
 				      intel_connector->port);
@@ -372,7 +372,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
 	int bpp = 24; /* MST uses fixed bpp */
 	int max_rate, mode_rate, max_lanes, max_link_clock;
 
-	if (!READ_ONCE(connector->registered))
+	if (drm_connector_is_unregistered(connector))
 		return MODE_ERROR;
 
 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)

+ 4 - 10
drivers/gpu/drm/nouveau/dispnv50/disp.c

@@ -881,22 +881,16 @@ nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
 {
 	struct nv50_head *head = nv50_head(connector_state->crtc);
 	struct nv50_mstc *mstc = nv50_mstc(connector);
-	if (mstc->port) {
-		struct nv50_mstm *mstm = mstc->mstm;
-		return &mstm->msto[head->base.index]->encoder;
-	}
-	return NULL;
+
+	return &mstc->mstm->msto[head->base.index]->encoder;
 }
 
 static struct drm_encoder *
 nv50_mstc_best_encoder(struct drm_connector *connector)
 {
 	struct nv50_mstc *mstc = nv50_mstc(connector);
-	if (mstc->port) {
-		struct nv50_mstm *mstm = mstc->mstm;
-		return &mstm->msto[0]->encoder;
-	}
-	return NULL;
+
+	return &mstc->mstm->msto[0]->encoder;
 }
 
 static enum drm_mode_status

+ 20 - 9
drivers/gpu/drm/panel/panel-simple.c

@@ -56,6 +56,8 @@ struct panel_desc {
 	/**
 	 * @prepare: the time (in milliseconds) that it takes for the panel to
 	 *           become ready and start receiving video data
+	 * @hpd_absent_delay: Add this to the prepare delay if we know Hot
+	 *                    Plug Detect isn't used.
 	 * @enable: the time (in milliseconds) that it takes for the panel to
 	 *          display the first valid frame after starting to receive
 	 *          video data
@@ -66,6 +68,7 @@ struct panel_desc {
 	 */
 	struct {
 		unsigned int prepare;
+		unsigned int hpd_absent_delay;
 		unsigned int enable;
 		unsigned int disable;
 		unsigned int unprepare;
@@ -79,6 +82,7 @@ struct panel_simple {
 	struct drm_panel base;
 	bool prepared;
 	bool enabled;
+	bool no_hpd;
 
 	const struct panel_desc *desc;
 
@@ -202,6 +206,7 @@ static int panel_simple_unprepare(struct drm_panel *panel)
 static int panel_simple_prepare(struct drm_panel *panel)
 {
 	struct panel_simple *p = to_panel_simple(panel);
+	unsigned int delay;
 	int err;
 
 	if (p->prepared)
@@ -215,8 +220,11 @@ static int panel_simple_prepare(struct drm_panel *panel)
 
 	gpiod_set_value_cansleep(p->enable_gpio, 1);
 
-	if (p->desc->delay.prepare)
-		msleep(p->desc->delay.prepare);
+	delay = p->desc->delay.prepare;
+	if (p->no_hpd)
+		delay += p->desc->delay.hpd_absent_delay;
+	if (delay)
+		msleep(delay);
 
 	p->prepared = true;
 
@@ -305,6 +313,8 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
 	panel->prepared = false;
 	panel->desc = desc;
 
+	panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd");
+
 	panel->supply = devm_regulator_get(dev, "power");
 	if (IS_ERR(panel->supply))
 		return PTR_ERR(panel->supply);
@@ -1363,7 +1373,7 @@ static const struct panel_desc innolux_n156bge_l21 = {
 	},
 };
 
-static const struct drm_display_mode innolux_tv123wam_mode = {
+static const struct drm_display_mode innolux_p120zdg_bf1_mode = {
 	.clock = 206016,
 	.hdisplay = 2160,
 	.hsync_start = 2160 + 48,
@@ -1377,15 +1387,16 @@ static const struct drm_display_mode innolux_tv123wam_mode = {
 	.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
 };
 
-static const struct panel_desc innolux_tv123wam = {
-	.modes = &innolux_tv123wam_mode,
+static const struct panel_desc innolux_p120zdg_bf1 = {
+	.modes = &innolux_p120zdg_bf1_mode,
 	.num_modes = 1,
 	.bpc = 8,
 	.size = {
-		.width = 259,
-		.height = 173,
+		.width = 254,
+		.height = 169,
 	},
 	.delay = {
+		.hpd_absent_delay = 200,
 		.unprepare = 500,
 	},
 };
@@ -2445,8 +2456,8 @@ static const struct of_device_id platform_of_match[] = {
 		.compatible = "innolux,n156bge-l21",
 		.data = &innolux_n156bge_l21,
 	}, {
-		.compatible = "innolux,tv123wam",
-		.data = &innolux_tv123wam,
+		.compatible = "innolux,p120zdg-bf1",
+		.data = &innolux_p120zdg_bf1,
 	}, {
 		.compatible = "innolux,zj070na-01p",
 		.data = &innolux_zj070na_01p,

+ 69 - 2
include/drm/drm_connector.h

@@ -82,6 +82,53 @@ enum drm_connector_status {
 	connector_status_unknown = 3,
 };
 
+/**
+ * enum drm_connector_registration_status - userspace registration status for
+ * a &drm_connector
+ *
+ * This enum is used to track the status of initializing a connector and
+ * registering it with userspace, so that DRM can prevent bogus modesets on
+ * connectors that no longer exist.
+ */
+enum drm_connector_registration_state {
+	/**
+	 * @DRM_CONNECTOR_INITIALIZING: The connector has just been created,
+	 * but has yet to be exposed to userspace. There should be no
+	 * additional restrictions to how the state of this connector may be
+	 * modified.
+	 */
+	DRM_CONNECTOR_INITIALIZING = 0,
+
+	/**
+	 * @DRM_CONNECTOR_REGISTERED: The connector has been fully initialized
+	 * and registered with sysfs, as such it has been exposed to
+	 * userspace. There should be no additional restrictions to how the
+	 * state of this connector may be modified.
+	 */
+	DRM_CONNECTOR_REGISTERED = 1,
+
+	/**
+	 * @DRM_CONNECTOR_UNREGISTERED: The connector has either been exposed
+	 * to userspace and has since been unregistered and removed from
+	 * userspace, or the connector was unregistered before it had a chance
+	 * to be exposed to userspace (e.g. still in the
+	 * @DRM_CONNECTOR_INITIALIZING state). When a connector is
+	 * unregistered, there are additional restrictions to how its state
+	 * may be modified:
+	 *
+	 * - An unregistered connector may only have its DPMS changed from
+	 *   On->Off. Once DPMS is changed to Off, it may not be switched back
+	 *   to On.
+	 * - Modesets are not allowed on unregistered connectors, unless they
+	 *   would result in disabling its assigned CRTCs. This means
+	 *   disabling a CRTC on an unregistered connector is OK, but enabling
+	 *   one is not.
+	 * - Removing a CRTC from an unregistered connector is OK, but new
+	 *   CRTCs may never be assigned to an unregistered connector.
+	 */
+	DRM_CONNECTOR_UNREGISTERED = 2,
+};
+
 enum subpixel_order {
 	SubPixelUnknown = 0,
 	SubPixelHorizontalRGB,
@@ -853,10 +900,12 @@ struct drm_connector {
 	bool ycbcr_420_allowed;
 
 	/**
-	 * @registered: Is this connector exposed (registered) with userspace?
+	 * @registration_state: Is this connector initializing, exposed
+	 * (registered) with userspace, or unregistered?
+	 *
 	 * Protected by @mutex.
 	 */
-	bool registered;
+	enum drm_connector_registration_state registration_state;
 
 	/**
 	 * @modes:
@@ -1166,6 +1215,24 @@ static inline void drm_connector_unreference(struct drm_connector *connector)
 	drm_connector_put(connector);
 }
 
+/**
+ * drm_connector_is_unregistered - has the connector been unregistered from
+ * userspace?
+ * @connector: DRM connector
+ *
+ * Checks whether or not @connector has been unregistered from userspace.
+ *
+ * Returns:
+ * True if the connector was unregistered, false if the connector is
+ * registered or has not yet been registered with userspace.
+ */
+static inline bool
+drm_connector_is_unregistered(struct drm_connector *connector)
+{
+	return READ_ONCE(connector->registration_state) ==
+		DRM_CONNECTOR_UNREGISTERED;
+}
+
 const char *drm_get_connector_status_name(enum drm_connector_status status);
 const char *drm_get_subpixel_order_name(enum subpixel_order order);
 const char *drm_get_dpms_name(int val);