Browse Source

Merged TI feature audio-display into ti-linux-4.19.y

TI-Feature: audio-display
TI-Branch: audio_display-ti-linux-4.19.y

* 'audio_display-ti-linux-4.19.y' of ssh://bitbucket.itg.ti.com/lcpdpublicdom/audio-display: (29 commits)
  drm/tidss: dispc7: Change all const static to static const to silence W=1
  drm/tidss: dispc7: Fix W=1 warnings from dispc6_vp_mode_valid()
  drm/tidss: dispc6: Remove unsused fir coefs to fix W=1 warnings
  drm/tidss: dispc6: Fix W=1 warnings from dispc6_vp_mode_valid()
  drm/tidss: scale_coefs: Remove unused coefs_null to silence W=1
  drm/tidss: WB: fix error reporting in tidss_wb_init
  drm/tidss: WB: remove unnecessary kernel trace
  media: i2c: ov5640: fix potential null pointer dereference
  drm/tidss: dispc7: Drop redundant and uneeded max_pclk and min_pclk
  drm/tidss: dispc7: Implement VP bus format specific limits
  drm/tidss: dispc7: More explicit enum and struct member name
  drm/tidss: dispc7: Ensure output width is divisible by 2
  drm/bridge: cdns-mhdp: Check link status if HPD interrupt is received
  drm/bridge: cdns-mhdp: Protect firmware mailbox messaging with mutex
  drm/bridge: cdns-mhdp: Add missing resource deallocations
  drm/bridge: cdns-mhdp: Print error if DP link BW isn't enough for mode
  drm/bridge: cdns-mhdp: Add simple mode_valid()
  drm/bridge: sii902x: fix missing static
  drm/omap: hdmi4: fix use of uninitialized var
  drm/tidss: cleanup dma related settings
  ...

Signed-off-by: LCPD Auto Merger <lcpd_integration@list.ti.com>
LCPD Auto Merger 6 years ago
parent
commit
81488ea1dc

+ 1 - 0
Documentation/devicetree/bindings/display/bridge/cdns,mhdp.txt

@@ -5,6 +5,7 @@ The Cadence MHDP bridge is a DPI to DP bridge.
 
 Required properties:
 - compatible: should be "cdns,mhdp8546",
+  Use "ti,j721e-mhdp8546" for TI J7 SoCs.
 - reg: physical base address and length of the controller's registers,
 - clocks: DP bridge clock, it's used by the IP to know how to translate
 	a number of clock cycles into a time (which is used to comply

+ 3 - 0
Documentation/devicetree/bindings/phy/phy-cadence-dp.txt

@@ -7,6 +7,9 @@ the Cadence MHDP DisplayPort controller.
 -------------------------------------------------------------------------------
 Required properties (controller (parent) node):
 - compatible	: Should be "cdns,dp-phy"
+- clocks	: PHY reference clock. Must contain an entry in clock-names.
+		  See ../clocks/clock-bindings.txt for details.
+- clock-names	: Must be "refclk"
 - reg		: Defines the following sets of registers in the parent
 		  mhdp device:
 			- Offset of the DPTX PHY configuration registers

+ 3 - 1
arch/arm64/boot/dts/ti/k3-j721e-main.dtsi

@@ -940,11 +940,13 @@
 			num_lanes = <4>;
 			max_bit_rate = <5400>;
 			#phy-cells = <0>;
+			clocks = <&wiz4_pll0_refclk>;
+			clock-names = "refclk";
 		};
 	};
 
 	mhdp: dp-bridge@000A000000 {
-		compatible = "cdns,mhdp8546";
+		compatible = "ti,j721e-mhdp8546", "cdns,mhdp8546";
 		reg = <0x00 0x0A000000 0x0 0x30A00>, /* DSS_EDP0_V2A_CORE_VP_REGS_APB - upto PHY mapped area */
 		      <0x00 0x04F40000 0x0 0x20>;    /* DSS_EDP0_INTG_CFG_VP */
 

+ 232 - 41
drivers/gpu/drm/bridge/cdns-mhdp.c

@@ -31,6 +31,7 @@
 
 #include <linux/irq.h>
 #include <linux/of_irq.h>
+#include <linux/of_device.h>
 
 #include <asm/unaligned.h>
 
@@ -84,6 +85,11 @@
 #define FW_STANDBY				0
 #define FW_ACTIVE				1
 
+#define DPTX_READ_EVENT_HPD_TO_HIGH            BIT(0)
+#define DPTX_READ_EVENT_HPD_TO_LOW             BIT(1)
+#define DPTX_READ_EVENT_HPD_PULSE              BIT(2)
+#define DPTX_READ_EVENT_HPD_STATE              BIT(3)
+
 static inline u32 get_unaligned_be24(const void *p)
 {
 	const u8 *_p = p;
@@ -104,6 +110,8 @@ static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
 {
 	int val, ret;
 
+	WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
+
 	ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_EMPTY,
 				 val, !val, MAILBOX_RETRY_US,
 				 MAILBOX_TIMEOUT_US);
@@ -117,6 +125,8 @@ static int cdp_dp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val)
 {
 	int ret, full;
 
+	WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
+
 	ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_FULL,
 				 full, !full, MAILBOX_RETRY_US,
 				 MAILBOX_TIMEOUT_US);
@@ -218,6 +228,8 @@ int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value)
 
 	put_unaligned_be32(addr, msg);
 
+	mutex_lock(&mhdp->mbox_mutex);
+
 	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_GENERAL,
 				     GENERAL_REGISTER_READ,
 				     sizeof(msg), msg);
@@ -243,6 +255,7 @@ int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value)
 	*value = get_unaligned_be32(resp + 4);
 
 err_reg_read:
+	mutex_unlock(&mhdp->mbox_mutex);
 	if (ret) {
 		DRM_DEV_ERROR(mhdp->dev, "Failed to read register.\n");
 		*value = 0;
@@ -255,12 +268,19 @@ static
 int cdns_mhdp_reg_write(struct cdns_mhdp_device *mhdp, u16 addr, u32 val)
 {
 	u8 msg[6];
+	int ret;
 
 	put_unaligned_be16(addr, msg);
 	put_unaligned_be32(val, msg + 2);
 
-	return cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
-				      DPTX_WRITE_REGISTER, sizeof(msg), msg);
+	mutex_lock(&mhdp->mbox_mutex);
+
+	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+				     DPTX_WRITE_REGISTER, sizeof(msg), msg);
+
+	mutex_unlock(&mhdp->mbox_mutex);
+
+	return ret;
 }
 
 static
@@ -268,14 +288,21 @@ int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device *mhdp, u16 addr,
 			    u8 start_bit, u8 bits_no, u32 val)
 {
 	u8 field[8];
+	int ret;
 
 	put_unaligned_be16(addr, field);
 	field[2] = start_bit;
 	field[3] = bits_no;
 	put_unaligned_be32(val, field + 4);
 
-	return cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
-				      DPTX_WRITE_FIELD, sizeof(field), field);
+	mutex_lock(&mhdp->mbox_mutex);
+
+	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+				     DPTX_WRITE_FIELD, sizeof(field), field);
+
+	mutex_unlock(&mhdp->mbox_mutex);
+
+	return ret;
 }
 
 static
@@ -288,6 +315,8 @@ int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp,
 	put_unaligned_be16(len, msg);
 	put_unaligned_be24(addr, msg + 2);
 
+	mutex_lock(&mhdp->mbox_mutex);
+
 	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
 				     DPTX_READ_DPCD, sizeof(msg), msg);
 	if (ret)
@@ -306,6 +335,8 @@ int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp,
 	ret = cdns_mhdp_mailbox_read_receive(mhdp, data, len);
 
 err_dpcd_read:
+	mutex_unlock(&mhdp->mbox_mutex);
+
 	return ret;
 }
 
@@ -319,6 +350,8 @@ int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value)
 	put_unaligned_be24(addr, msg + 2);
 	msg[5] = value;
 
+	mutex_lock(&mhdp->mbox_mutex);
+
 	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
 				     DPTX_WRITE_DPCD, sizeof(msg), msg);
 	if (ret)
@@ -337,6 +370,8 @@ int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value)
 		ret = -EINVAL;
 
 err_dpcd_write:
+	mutex_unlock(&mhdp->mbox_mutex);
+
 	if (ret)
 		DRM_DEV_ERROR(mhdp->dev, "dpcd write failed: %d\n", ret);
 	return ret;
@@ -354,6 +389,8 @@ int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable)
 	msg[3] = 1;
 	msg[4] = enable ? FW_ACTIVE : FW_STANDBY;
 
+	mutex_lock(&mhdp->mbox_mutex);
+
 	for (i = 0; i < sizeof(msg); i++) {
 		ret = cdp_dp_mailbox_write(mhdp, msg[i]);
 		if (ret)
@@ -372,6 +409,8 @@ int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable)
 	ret = 0;
 
 err_set_firmware_active:
+	mutex_unlock(&mhdp->mbox_mutex);
+
 	if (ret < 0)
 		DRM_DEV_ERROR(mhdp->dev, "set firmware active failed\n");
 	return ret;
@@ -383,6 +422,8 @@ int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp)
 	u8 status;
 	int ret;
 
+	mutex_lock(&mhdp->mbox_mutex);
+
 	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
 				     DPTX_HPD_STATE, 0, NULL);
 	if (ret)
@@ -398,9 +439,13 @@ int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp)
 	if (ret)
 		goto err_get_hpd;
 
+	mutex_unlock(&mhdp->mbox_mutex);
+
 	return status;
 
 err_get_hpd:
+	mutex_unlock(&mhdp->mbox_mutex);
+
 	DRM_DEV_ERROR(mhdp->dev, "get hpd status failed: %d\n", ret);
 	return ret;
 }
@@ -413,6 +458,8 @@ int cdns_mhdp_get_edid_block(void *data, u8 *edid,
 	u8 msg[2], reg[2], i;
 	int ret;
 
+	mutex_lock(&mhdp->mbox_mutex);
+
 	for (i = 0; i < 4; i++) {
 		msg[0] = block / 2;
 		msg[1] = block % 2;
@@ -441,6 +488,8 @@ int cdns_mhdp_get_edid_block(void *data, u8 *edid,
 			break;
 	}
 
+	mutex_unlock(&mhdp->mbox_mutex);
+
 	if (ret)
 		DRM_DEV_ERROR(mhdp->dev, "get block[%d] edid failed: %d\n",
 			      block, ret);
@@ -448,26 +497,31 @@ int cdns_mhdp_get_edid_block(void *data, u8 *edid,
 	return ret;
 }
 
-static __maybe_unused
+static
 int cdns_mhdp_read_event(struct cdns_mhdp_device *mhdp)
 {
 	u8 event = 0;
 	int ret;
 
+	mutex_lock(&mhdp->mbox_mutex);
+
 	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
 				     DPTX_READ_EVENT, 0, NULL);
 	if (ret)
-		return ret;
+		goto out;
 
 	ret = cdns_mhdp_mailbox_validate_receive(mhdp,
 						 MB_MODULE_ID_DP_TX,
 						 DPTX_READ_EVENT,
 						 sizeof(event));
 	if (ret < 0)
-		return ret;
+		goto out;
 
 	ret = cdns_mhdp_mailbox_read_receive(mhdp, &event,
 					     sizeof(event));
+out:
+	mutex_unlock(&mhdp->mbox_mutex);
+
 	if (ret < 0)
 		return ret;
 
@@ -476,7 +530,7 @@ int cdns_mhdp_read_event(struct cdns_mhdp_device *mhdp)
 
 static
 int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp,
-			u8 nlanes, u16 udelay, u8 *lanes_data, u8 *dpcd)
+			u8 nlanes, u16 udelay, u8 *lanes_data, u8 *link_status)
 {
 	u8 payload[7];
 	u8 hdr[5]; /* For DPCD read response header */
@@ -495,6 +549,8 @@ int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp,
 	put_unaligned_be16(udelay, payload + 1);
 	memcpy(payload + 3, lanes_data, nlanes);
 
+	mutex_lock(&mhdp->mbox_mutex);
+
 	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
 				     DPTX_ADJUST_LT,
 				     sizeof(payload), payload);
@@ -516,9 +572,11 @@ int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp,
 	if (addr != DP_LANE0_1_STATUS)
 		goto err_adjust_lt;
 
-	ret = cdns_mhdp_mailbox_read_receive(mhdp, dpcd, nregs);
+	ret = cdns_mhdp_mailbox_read_receive(mhdp, link_status, nregs);
 
 err_adjust_lt:
+	mutex_unlock(&mhdp->mbox_mutex);
+
 	if (ret)
 		DRM_DEV_ERROR(mhdp->dev, "Failed to adjust Link Training.\n");
 
@@ -534,8 +592,20 @@ err_adjust_lt:
 
 #define CDNS_KEEP_ALIVE_TIMEOUT			2000
 
+#ifdef CONFIG_DRM_CDNS_MHDP_J721E
+static const struct mhdp_platform_ops mhdp_ti_j721e_ops = {
+	.init = cdns_mhdp_j721e_init,
+	.exit = cdns_mhdp_j721e_fini,
+	.enable = cdns_mhdp_j721e_enable,
+	.disable = cdns_mhdp_j721e_disable,
+};
+#endif
+
 static const struct of_device_id mhdp_ids[] = {
 	{ .compatible = "cdns,mhdp8546", },
+#ifdef CONFIG_DRM_CDNS_MHDP_J721E
+	{ .compatible = "ti,j721e-mhdp8546", .data = &mhdp_ti_j721e_ops },
+#endif
 	{ /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, mhdp_ids);
@@ -713,6 +783,53 @@ static int load_firmware(struct cdns_mhdp_device *mhdp)
 	return 0;
 }
 
+static void mhdp_check_link(struct cdns_mhdp_device *mhdp)
+{
+	struct drm_connector *conn = &mhdp->connector;
+	u8 status[DP_LINK_STATUS_SIZE];
+	bool hpd_state;
+	int hpd_event;
+	int ret;
+
+	/* Nothing to check if there is no link */
+	if (!mhdp->link_up)
+		return;
+
+	hpd_event = cdns_mhdp_read_event(mhdp);
+
+	/* Geting event bits failed, bail out */
+	if (hpd_event < 0) {
+		dev_warn(mhdp->dev, "%s: read event failed: %d\n",
+			 __func__, hpd_event);
+		return;
+	}
+
+	hpd_state = !!(hpd_event & DPTX_READ_EVENT_HPD_STATE);
+
+	/* No point the check the link if HPD is down (cable is unplugged) */
+	if (!hpd_state)
+		return;
+
+	/*
+	 * Prevent display reconfiguration between link check and link
+	 * status property setting. We must use the legacy giant-lock
+	 * since drm_connector_set_link_status_property()'s fine
+	 * grained DRM locking implementation is broken.
+	 */
+	mutex_lock(&conn->dev->mode_config.mutex);
+
+	/* Check if the link is still up */
+	ret = drm_dp_dpcd_read_link_status(&mhdp->aux, status);
+
+	if (ret < 0 || /* If dpcd read fails, assume the link is down too */
+	    !drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) ||
+	    !drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes))
+		/* Link is broken, indicate it with the link status property */
+		drm_connector_set_link_status_property(conn, DRM_MODE_LINK_STATUS_BAD);
+
+	mutex_unlock(&conn->dev->mode_config.mutex);
+}
+
 static irqreturn_t mhdp_irq_handler(int irq, void *data)
 {
 	struct cdns_mhdp_device *mhdp = (struct cdns_mhdp_device *)data;
@@ -738,8 +855,11 @@ static irqreturn_t mhdp_irq_handler(int irq, void *data)
 	bridge_attached = mhdp->bridge_attached;
 	spin_unlock(&mhdp->start_lock);
 
-	if (bridge_attached && (sw_ev0 & CDNS_DPTX_HPD))
+	if (bridge_attached && (sw_ev0 & CDNS_DPTX_HPD)) {
+		mhdp_check_link(mhdp);
+
 		drm_kms_helper_hotplug_event(mhdp->bridge.dev);
+	}
 
 	return IRQ_HANDLED;
 }
@@ -802,13 +922,15 @@ static int cdns_mhdp_get_modes(struct drm_connector *connector)
 	 * HACK: Warn about unsupported display formats until we deal
 	 *       with them correctly.
 	 */
-	if (!(connector->display_info.color_formats &
+	if (connector->display_info.color_formats &&
+	    !(connector->display_info.color_formats &
 	      mhdp->display_fmt.color_format))
 		dev_warn(mhdp->dev,
 			 "%s: No supported color_format found (0x%08x)\n",
 			__func__, connector->display_info.color_formats);
 
-	if (connector->display_info.bpc < mhdp->display_fmt.bpc)
+	if (connector->display_info.bpc &&
+	    connector->display_info.bpc < mhdp->display_fmt.bpc)
 		dev_warn(mhdp->dev, "%s: Display bpc only %d < %d\n",
 			 __func__, connector->display_info.bpc,
 			 mhdp->display_fmt.bpc);
@@ -848,9 +970,46 @@ static int cdns_mhdp_detect(struct drm_connector *conn,
 	return connector_status_disconnected;
 }
 
+static
+bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp,
+			    const struct drm_display_mode *mode,
+			    int lanes, int rate)
+{
+	u32 max_bw, req_bw, bpp;
+
+	bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
+	req_bw = mode->clock * bpp / 8;
+
+	max_bw = lanes * rate;
+
+	if (req_bw > max_bw) {
+		dev_dbg(mhdp->dev, "%s: %s (%u * %u/8 =) %u > %u (= %u * %u)\n",
+			__func__, mode->name, mode->clock, bpp, req_bw,
+			max_bw, lanes, rate);
+
+		return false;
+	}
+
+	return true;
+}
+
+static
+enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn,
+					  struct drm_display_mode *mode)
+{
+	struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
+
+	if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->host.lanes_cnt,
+				    mhdp->host.link_rate))
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
 static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs = {
 	.detect_ctx = cdns_mhdp_detect,
 	.get_modes = cdns_mhdp_get_modes,
+	.mode_valid = cdns_mhdp_mode_valid,
 };
 
 static const struct drm_connector_funcs cdns_mhdp_conn_funcs = {
@@ -1062,7 +1221,7 @@ static bool mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp,
 					  unsigned int training_interval)
 {
 	u8 lanes_data[CDNS_DP_MAX_NUM_LANES], fail_counter_short = 0;
-	u8 dpcd[DP_LINK_STATUS_SIZE];
+	u8 link_status[DP_LINK_STATUS_SIZE];
 	u32 reg32;
 	union phy_configure_opts phy_cfg;
 
@@ -1079,10 +1238,10 @@ static bool mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp,
 			   (eq_tps != 4) ? eq_tps | DP_LINK_SCRAMBLING_DISABLE :
 			   CDNS_DP_TRAINING_PATTERN_4);
 
-	drm_dp_dpcd_read_link_status(&mhdp->aux, dpcd);
+	drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
 
 	do {
-		mhdp_get_adjust_train(mhdp, dpcd, lanes_data, &phy_cfg);
+		mhdp_get_adjust_train(mhdp, link_status, lanes_data, &phy_cfg);
 		phy_cfg.dp.lanes = (mhdp->link.num_lanes);
 		phy_cfg.dp.ssc = false;
 		phy_cfg.dp.set_lanes = false;
@@ -1091,19 +1250,19 @@ static bool mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp,
 		phy_configure(mhdp->phy,  &phy_cfg);
 
 		cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes,
-				    training_interval, lanes_data, dpcd);
+				    training_interval, lanes_data, link_status);
 
-		if (!drm_dp_clock_recovery_ok(dpcd, mhdp->link.num_lanes))
+		if (!drm_dp_clock_recovery_ok(link_status, mhdp->link.num_lanes))
 			goto err;
 
-		if (drm_dp_channel_eq_ok(dpcd, mhdp->link.num_lanes)) {
+		if (drm_dp_channel_eq_ok(link_status, mhdp->link.num_lanes)) {
 			dev_dbg(mhdp->dev, "EQ phase succeeded\n");
 			return true;
 		}
 
 		fail_counter_short++;
 
-		mhdp_adjust_requested_eq(mhdp, dpcd);
+		mhdp_adjust_requested_eq(mhdp, link_status);
 	} while (fail_counter_short < 5);
 
 err:
@@ -1174,11 +1333,11 @@ static void mhdp_validate_cr(struct cdns_mhdp_device *mhdp, bool *cr_done,
 	}
 }
 
-static bool mhdp_link_training_clock_recovery(struct cdns_mhdp_device *mhdp)
+static bool mhdp_link_training_cr(struct cdns_mhdp_device *mhdp)
 {
 	u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
 	fail_counter_short = 0, fail_counter_cr_long = 0;
-	u8 dpcd[DP_LINK_STATUS_SIZE];
+	u8 link_status[DP_LINK_STATUS_SIZE];
 	bool cr_done;
 	union phy_configure_opts phy_cfg;
 
@@ -1186,14 +1345,14 @@ static bool mhdp_link_training_clock_recovery(struct cdns_mhdp_device *mhdp)
 
 	mhdp_link_training_init(mhdp);
 
-	drm_dp_dpcd_read_link_status(&mhdp->aux, dpcd);
+	drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
 
 	do {
-		u8 requested_adjust_volt_swing[CDNS_DP_MAX_NUM_LANES] = {},
-									requested_adjust_pre_emphasis[CDNS_DP_MAX_NUM_LANES] = {};
+		u8 requested_adjust_volt_swing[CDNS_DP_MAX_NUM_LANES] = {};
+		u8 requested_adjust_pre_emphasis[CDNS_DP_MAX_NUM_LANES] = {};
 		bool same_before_adjust, max_swing_reached;
 
-		mhdp_get_adjust_train(mhdp, dpcd, lanes_data, &phy_cfg);
+		mhdp_get_adjust_train(mhdp, link_status, lanes_data, &phy_cfg);
 		phy_cfg.dp.lanes = (mhdp->link.num_lanes);
 		phy_cfg.dp.ssc = false;
 		phy_cfg.dp.set_lanes = false;
@@ -1202,10 +1361,10 @@ static bool mhdp_link_training_clock_recovery(struct cdns_mhdp_device *mhdp)
 		phy_configure(mhdp->phy,  &phy_cfg);
 
 		cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, 100,
-				    lanes_data, dpcd);
+				    lanes_data, link_status);
 
 		mhdp_validate_cr(mhdp, &cr_done, &same_before_adjust,
-				 &max_swing_reached, lanes_data, dpcd,
+				 &max_swing_reached, lanes_data, link_status,
 				 requested_adjust_volt_swing,
 				 requested_adjust_pre_emphasis);
 
@@ -1232,7 +1391,7 @@ static bool mhdp_link_training_clock_recovery(struct cdns_mhdp_device *mhdp)
 		 * Voltage swing/pre-emphasis adjust requested
 		 * during CR phase
 		 */
-		mhdp_adjust_requested_cr(mhdp, dpcd,
+		mhdp_adjust_requested_cr(mhdp, link_status,
 					 requested_adjust_volt_swing,
 					 requested_adjust_pre_emphasis);
 	} while (fail_counter_short < 5 && fail_counter_cr_long < 10);
@@ -1267,7 +1426,7 @@ static int mhdp_link_training(struct cdns_mhdp_device *mhdp,
 	const u8 eq_tps = eq_training_pattern_supported(mhdp->host, mhdp->sink);
 
 	while (1) {
-		if (!mhdp_link_training_clock_recovery(mhdp)) {
+		if (!mhdp_link_training_cr(mhdp)) {
 			if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
 			    DP_LINK_BW_1_62) {
 				dev_dbg(mhdp->dev,
@@ -1398,7 +1557,8 @@ static void cdns_mhdp_disable(struct drm_bridge *bridge)
 	cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
 			    resp & ~(CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN));
 
-	cdns_mhdp_j721e_disable(mhdp);
+	if (mhdp->ops && mhdp->ops->disable)
+		mhdp->ops->disable(mhdp);
 }
 
 static u32 get_training_interval_us(struct cdns_mhdp_device *mhdp,
@@ -1528,6 +1688,14 @@ static int cdns_mhdp_sst_enable(struct drm_bridge *bridge)
 
 	bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
 
+	if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
+				    mhdp->link.rate)) {
+		dev_err(mhdp->dev, "%s: Not enough BW for %s (%u lanes at %u Mbps)\n",
+			__func__, mode->name, mhdp->link.num_lanes,
+			mhdp->link.rate / 100);
+		return -EINVAL;
+	}
+
 	/* find optimal tu_size */
 	required_bandwidth = pxlclock * bpp / 8;
 	available_bandwidth = mhdp->link.num_lanes * rate;
@@ -1544,8 +1712,13 @@ static int cdns_mhdp_sst_enable(struct drm_bridge *bridge)
 	} while ((vs == 1 || ((vs_f > 850 || vs_f < 100) && vs_f != 0) ||
 		  tu_size - vs < 2) && tu_size < 64);
 
-	if (vs > 64)
+	if (vs > 64) {
+		dev_err(mhdp->dev,
+			"%s: No space for framing %s (%u lanes at %u Mbps)\n",
+			__func__, mode->name, mhdp->link.num_lanes,
+			mhdp->link.rate / 100);
 		return -EINVAL;
+	}
 
 	cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_TU,
 			    CDNS_DP_FRAMER_TU_VS(vs) |
@@ -1743,7 +1916,8 @@ void cdns_mhdp_enable(struct drm_bridge *bridge)
 
 	dev_dbg(mhdp->dev, "bridge enable\n");
 
-	cdns_mhdp_j721e_enable(mhdp);
+	if (mhdp->ops && mhdp->ops->enable)
+		mhdp->ops->enable(mhdp);
 
 	/* Enable VIF clock for stream 0 */
 	cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
@@ -1781,6 +1955,7 @@ static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
 
 static int mhdp_probe(struct platform_device *pdev)
 {
+	const struct of_device_id *match;
 	struct resource *regs;
 	struct cdns_mhdp_device *mhdp;
 	struct clk *clk;
@@ -1802,6 +1977,7 @@ static int mhdp_probe(struct platform_device *pdev)
 
 	mhdp->clk = clk;
 	mhdp->dev = &pdev->dev;
+	mutex_init(&mhdp->mbox_mutex);
 	spin_lock_init(&mhdp->start_lock);
 	dev_set_drvdata(&pdev->dev, mhdp);
 
@@ -1824,19 +2000,26 @@ static int mhdp_probe(struct platform_device *pdev)
 
 	clk_prepare_enable(clk);
 
+	match = of_match_device(mhdp_ids, &pdev->dev);
+	if (!match)
+		return -ENODEV;
+	mhdp->ops = (struct mhdp_platform_ops *)match->data;
+
 	pm_runtime_enable(&pdev->dev);
 	ret = pm_runtime_get_sync(&pdev->dev);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "pm_runtime_get_sync failed\n");
 		pm_runtime_disable(&pdev->dev);
-		return ret;
+		goto clk_disable;
 	}
 
-	ret = cdns_mhdp_j721e_init(mhdp);
-	if (ret != 0) {
-		dev_err(&pdev->dev, "J721E Wrapper initialization failed: %d\n",
-			ret);
-		goto runtime_put;
+	if (mhdp->ops && mhdp->ops->init) {
+		ret = mhdp->ops->init(mhdp);
+		if (ret != 0) {
+			dev_err(&pdev->dev, "MHDP platform initialization failed: %d\n",
+				ret);
+			goto runtime_put;
+		}
 	}
 
 	rate = clk_get_rate(clk);
@@ -1852,10 +2035,9 @@ static int mhdp_probe(struct platform_device *pdev)
 	ret = devm_request_threaded_irq(mhdp->dev, irq, NULL, mhdp_irq_handler,
 					IRQF_ONESHOT, "mhdp8546", mhdp);
 	if (ret) {
-		dev_err(&pdev->dev,
-			"cannot install IRQ %d\n", irq);
+		dev_err(&pdev->dev, "cannot install IRQ %d\n", irq);
 		ret = -EIO;
-		goto runtime_put;
+		goto j721e_fini;
 	}
 
 	/* Read source capabilities, based on PHY's device tree properties. */
@@ -1909,9 +2091,13 @@ static int mhdp_probe(struct platform_device *pdev)
 
 phy_exit:
 	phy_exit(mhdp->phy);
+j721e_fini:
+	cdns_mhdp_j721e_fini(mhdp);
 runtime_put:
 	pm_runtime_put_sync(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
+clk_disable:
+	clk_disable_unprepare(mhdp->clk);
 
 	return ret;
 }
@@ -1925,6 +2111,9 @@ static int mhdp_remove(struct platform_device *pdev)
 	bool stop_fw = false;
 	int ret = 0;
 
+	if (mhdp->ops && mhdp->ops->exit)
+		mhdp->ops->exit(mhdp);
+
 	drm_bridge_remove(&mhdp->bridge);
 
 wait_loading:
@@ -1953,6 +2142,8 @@ wait_loading:
 
 	phy_exit(mhdp->phy);
 
+	cdns_mhdp_j721e_fini(mhdp);
+
 	pm_runtime_put_sync(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 

+ 14 - 0
drivers/gpu/drm/bridge/cdns-mhdp.h

@@ -224,6 +224,15 @@ enum mhdp_hw_state { MHDP_HW_INACTIVE = 0, /* HW not initialized */
 		     MHDP_HW_READY,	   /* HW ready, FW active*/
 		     MHDP_HW_STOPPED };	   /* Driver removal FW to be stopped */
 
+struct cdns_mhdp_device;
+
+struct mhdp_platform_ops {
+	int (*init)(struct cdns_mhdp_device *mhdp);
+	void (*exit)(struct cdns_mhdp_device *mhdp);
+	void (*enable)(struct cdns_mhdp_device *mhdp);
+	void (*disable)(struct cdns_mhdp_device *mhdp);
+};
+
 struct cdns_mhdp_device {
 	void __iomem *regs;
 	void __iomem *j721e_regs;
@@ -232,6 +241,11 @@ struct cdns_mhdp_device {
 	struct clk *clk;
 	struct phy *phy;
 
+	const struct mhdp_platform_ops *ops;
+
+	/* This is to protect mailbox communications with the firmware */
+	struct mutex mbox_mutex;
+
 	struct drm_connector connector;
 	struct drm_bridge bridge;
 

+ 1 - 1
drivers/gpu/drm/bridge/sii902x.c

@@ -662,7 +662,7 @@ static void sii902x_audio_shutdown(struct device *dev, void *data)
 	}
 }
 
-int sii902x_audio_digital_mute(struct device *dev, void *data, bool enable)
+static int sii902x_audio_digital_mute(struct device *dev, void *data, bool enable)
 {
 	struct sii902x *sii902x = dev_get_drvdata(dev);
 

+ 3 - 2
drivers/gpu/drm/omapdrm/dss/hdmi4_core.c

@@ -553,8 +553,9 @@ static void hdmi_core_audio_config(struct hdmi_core_data *core,
 	}
 
 	/* Set ACR clock divisor */
-	REG_FLD_MOD(av_base,
-			HDMI_CORE_AV_FREQ_SVAL, cfg->mclk_mode, 2, 0);
+	if (cfg->use_mclk)
+		REG_FLD_MOD(av_base, HDMI_CORE_AV_FREQ_SVAL,
+			    cfg->mclk_mode, 2, 0);
 
 	r = hdmi_read_reg(av_base, HDMI_CORE_AV_ACR_CTRL);
 	/*

+ 1 - 22
drivers/gpu/drm/tidss/tidss_dispc6.c

@@ -574,8 +574,7 @@ static enum drm_mode_status dispc6_vp_mode_valid(struct dispc_device *dispc,
 		return MODE_BAD_HVALUE;
 
 	if (vsw < 1 || vsw > 256 ||
-	    vfp < 0 || vfp > 4095 ||
-	    vbp < 0 || vbp > 4095)
+	    vfp > 4095 || vbp > 4095)
 		return MODE_BAD_VVALUE;
 
 	if (dispc->memory_bandwidth_limit) {
@@ -743,26 +742,6 @@ static const struct dispc6_vid_fir_coefs dispc6_fir_coefs_null = {
 	.c0 = { 512, 512, 512, 512, 512, 512, 512, 512, 256,  },
 };
 
-/* M=8, Upscale x >= 1 */
-static const struct dispc6_vid_fir_coefs dispc6_fir_coefs_m8 = {
-	.c2 = {	0, -4, -8, -16, -24, -32, -40, -48, 0, 2, 4, 6, 8, 6, 4, 2,  },
-	.c1 = { 0, 28, 56, 94, 132, 176, 220, 266, -56, -60, -64, -62, -60, -50, -40, -20,  },
-	.c0 = { 512, 506, 500, 478, 456, 424, 392, 352, 312,  },
-};
-
-/* 5-tap, M=22, Downscale Ratio 2.5 < x < 3 */
-static const struct dispc6_vid_fir_coefs dispc6_fir_coefs_m22_5tap = {
-	.c2 = { 16, 20, 24, 30, 36, 42, 48, 56, 0, 0, 0, 2, 4, 8, 12, 14,  },
-	.c1 = { 132, 140, 148, 156, 164, 172, 180, 186, 64, 72, 80, 88, 96, 104, 112, 122,  },
-	.c0 = { 216, 216, 216, 214, 212, 208, 204, 198, 192,  },
-};
-
-/* 3-tap, M=22, Downscale Ratio 2.5 < x < 3 */
-static const struct dispc6_vid_fir_coefs dispc6_fir_coefs_m22_3tap = {
-	.c1 = { 100, 118, 136, 156, 176, 196, 216, 236, 0, 10, 20, 30, 40, 54, 68, 84,  },
-	.c0 = { 312, 310, 308, 302, 296, 286, 276, 266, 256,  },
-};
-
 enum dispc6_vid_fir_coef_set {
 	DISPC6_VID_FIR_COEF_HORIZ,
 	DISPC6_VID_FIR_COEF_HORIZ_UV,

+ 143 - 120
drivers/gpu/drm/tidss/tidss_dispc7.c

@@ -33,9 +33,13 @@
 #include "tidss_scale_coefs.h"
 #include "tidss_dispc7.h"
 
+static const char *dispc7_plane_name(struct dispc_device *dispc, u32 hw_plane);
+
 static const struct dispc7_features dispc7_am6_feats = {
-	.min_pclk = 1000,
-	.max_pclk = 200000000,
+	.max_pclk_kHz = {
+		[DISPC7_VP_DPI] = 165000,
+		[DISPC7_VP_OLDI] = 165000,
+	},
 
 	.num_commons = 1,
 	.common_name = { "common" },
@@ -86,8 +90,10 @@ static const struct dispc7_features dispc7_am6_feats = {
 };
 
 static const struct dispc7_features dispc7_j721e_feats = {
-	.min_pclk = 1000,
-	.max_pclk = 600000000,
+	.max_pclk_kHz = {
+		[DISPC7_VP_DPI] = 170000,
+		[DISPC7_VP_INTERNAL] = 600000,
+	},
 
 	.num_commons = 4,
 	.common_name = { "common_m", "common_s0", "common_s1", "common_s2" },
@@ -117,8 +123,9 @@ static const struct dispc7_features dispc7_j721e_feats = {
 	.vp_name = { "vp1", "vp2", "vp3", "vp4" },
 	.ovr_name = { "ovr1", "ovr2", "ovr3", "ovr4" },
 	.vpclk_name = { "vp1", "vp2", "vp3", "vp4" },
-	.vp_bus_type =	{ DISPC7_VP_DPI, DISPC7_VP_DPI,
-			  DISPC7_VP_DPI, DISPC7_VP_DPI, },
+	/* Currently hard coded VP routing (see dispc7_initial_config()) */
+	.vp_bus_type =	{ DISPC7_VP_INTERNAL, DISPC7_VP_DPI,
+			  DISPC7_VP_INTERNAL, DISPC7_VP_DPI, },
 	.vp_feat = { .color = {
 			.has_ctm = true,
 			.gamma_size = 1024,
@@ -707,25 +714,25 @@ static void dispc7_write_irqenable(struct dispc_device *dispc, u64 mask)
 	dispc7_intr_read(dispc, DISPC_IRQENABLE_SET);
 }
 
-enum dispc7_oldi_mode { SPWG_18 = 0, JEIDA_24 = 1, SPWG_24 = 2 };
+enum dispc7_oldi_mode_reg_val { SPWG_18 = 0, JEIDA_24 = 1, SPWG_24 = 2 };
 
 struct dispc7_bus_format {
 	u32 bus_fmt;
 	u32 data_width;
-	enum dispc7_vp_bus_type bus_type;
-	enum dispc7_oldi_mode oldi_mode;
+	bool is_oldi_fmt;
+	enum dispc7_oldi_mode_reg_val oldi_mode_reg_val;
 };
 
 static const struct dispc7_bus_format dispc7_bus_formats[] = {
-	{ MEDIA_BUS_FMT_RGB444_1X12,		12, DISPC7_VP_DPI, 0 },
-	{ MEDIA_BUS_FMT_RGB565_1X16,		16, DISPC7_VP_DPI, 0 },
-	{ MEDIA_BUS_FMT_RGB666_1X18,		18, DISPC7_VP_DPI, 0 },
-	{ MEDIA_BUS_FMT_RGB888_1X24,		24, DISPC7_VP_DPI, 0 },
-	{ MEDIA_BUS_FMT_RGB101010_1X30,		30, DISPC7_VP_DPI, 0 },
-	{ MEDIA_BUS_FMT_RGB121212_1X36,		36, DISPC7_VP_DPI, 0 },
-	{ MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,	18, DISPC7_VP_OLDI, SPWG_18 },
-	{ MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,	24, DISPC7_VP_OLDI, SPWG_24 },
-	{ MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,	24, DISPC7_VP_OLDI, JEIDA_24 },
+	{ MEDIA_BUS_FMT_RGB444_1X12,		12, false, 0 },
+	{ MEDIA_BUS_FMT_RGB565_1X16,		16, false, 0 },
+	{ MEDIA_BUS_FMT_RGB666_1X18,		18, false, 0 },
+	{ MEDIA_BUS_FMT_RGB888_1X24,		24, false, 0 },
+	{ MEDIA_BUS_FMT_RGB101010_1X30,		30, false, 0 },
+	{ MEDIA_BUS_FMT_RGB121212_1X36,		36, false, 0 },
+	{ MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,	18, true, SPWG_18 },
+	{ MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,	24, true, SPWG_24 },
+	{ MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,	24, true, JEIDA_24 },
 };
 
 static const
@@ -808,7 +815,7 @@ static void dispc7_enable_oldi(struct dispc_device *dispc, u32 hw_videoport,
 
 	oldi_cfg |= BIT(7); /* DEPOL */
 
-	oldi_cfg = FLD_MOD(oldi_cfg, fmt->oldi_mode, 3, 1);
+	oldi_cfg = FLD_MOD(oldi_cfg, fmt->oldi_mode_reg_val, 3, 1);
 
 	oldi_cfg |= BIT(12); /* SOFTRST */
 
@@ -837,9 +844,6 @@ static void dispc7_vp_prepare(struct dispc_device *dispc, u32 hw_videoport,
 	if (WARN_ON(!fmt))
 		return;
 
-	if (WARN_ON(dispc->feat->vp_bus_type[hw_videoport] != fmt->bus_type))
-		return;
-
 	if (dispc->feat->vp_bus_type[hw_videoport] == DISPC7_VP_OLDI) {
 		dispc7_oldi_tx_power(dispc, true);
 
@@ -1002,11 +1006,17 @@ static enum drm_mode_status dispc7_vp_mode_valid(struct dispc_device *dispc,
 						 const struct drm_display_mode *mode)
 {
 	u32 hsw, hfp, hbp, vsw, vfp, vbp;
+	enum dispc7_vp_bus_type bus_type;
+	int max_pclk;
+
+	bus_type = dispc->feat->vp_bus_type[hw_videoport];
 
-	if (mode->clock * 1000 < dispc->feat->min_pclk)
-		return MODE_CLOCK_LOW;
+	max_pclk = dispc->feat->max_pclk_kHz[bus_type];
 
-	if (mode->clock * 1000 > dispc->feat->max_pclk)
+	if (WARN_ON(max_pclk == 0))
+		return MODE_BAD;
+
+	if (mode->clock > max_pclk)
 		return MODE_CLOCK_HIGH;
 
 	if (mode->hdisplay > 4096)
@@ -1019,6 +1029,17 @@ static enum drm_mode_status dispc7_vp_mode_valid(struct dispc_device *dispc,
 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 		return MODE_NO_INTERLACE;
 
+	/*
+	 * Enforce the output width is divisible by 2. Actually this
+	 * is only needed in following cases:
+	 * - YUV output selected (BT656, BT1120)
+	 * - Dithering enabled
+	 * - TDM with TDMCycleFormat == 3
+	 * But for simplicity we enforce that always.
+	 */
+	if ((mode->hdisplay % 2) != 0)
+		return MODE_BAD_HVALUE;
+
 	hfp = mode->hsync_start - mode->hdisplay;
 	hsw = mode->hsync_end - mode->hsync_start;
 	hbp = mode->htotal - mode->hsync_end;
@@ -1033,8 +1054,7 @@ static enum drm_mode_status dispc7_vp_mode_valid(struct dispc_device *dispc,
 		return MODE_BAD_HVALUE;
 
 	if (vsw < 1 || vsw > 256 ||
-	    vfp < 0 || vfp > 4095 ||
-	    vbp < 0 || vbp > 4095)
+	    vfp > 4095 || vbp > 4095)
 		return MODE_BAD_VVALUE;
 
 	return MODE_OK;
@@ -1063,10 +1083,10 @@ static int dispc7_vp_check(struct dispc_device *dispc, u32 hw_videoport,
 		return -EINVAL;
 	}
 
-	if (dispc->feat->vp_bus_type[hw_videoport] != fmt->bus_type) {
-		dev_dbg(dispc->dev, "%s: %s is not %s-port\n",
-			__func__, dispc->feat->vp_name[hw_videoport],
-			fmt->bus_type == DISPC7_VP_OLDI ? "OLDI" : "DPI");
+	if (dispc->feat->vp_bus_type[hw_videoport] != DISPC7_VP_OLDI &&
+	    fmt->is_oldi_fmt) {
+		dev_dbg(dispc->dev, "%s: %s is not OLDI-port\n",
+			__func__, dispc->feat->vp_name[hw_videoport]);
 		return -EINVAL;
 	}
 
@@ -1296,7 +1316,7 @@ static void dispc7_wb_write_csc(struct dispc_device *dispc,
 }
 
 /* YUV -> RGB, ITU-R BT.601, full range */
-const static struct dispc7_csc_coef csc_yuv2rgb_bt601_full = {
+static const struct dispc7_csc_coef csc_yuv2rgb_bt601_full = {
 	dispc7_csc_yuv2rgb_regval,
 	{ 256,   0,  358,	/* ry, rcb, rcr |1.000  0.000  1.402|*/
 	  256, -88, -182,	/* gy, gcb, gcr |1.000 -0.344 -0.714|*/
@@ -1308,7 +1328,7 @@ const static struct dispc7_csc_coef csc_yuv2rgb_bt601_full = {
 };
 
 /* YUV -> RGB, ITU-R BT.601, limited range */
-const static struct dispc7_csc_coef csc_yuv2rgb_bt601_lim = {
+static const struct dispc7_csc_coef csc_yuv2rgb_bt601_lim = {
 	dispc7_csc_yuv2rgb_regval,
 	{ 298,    0,  409,	/* ry, rcb, rcr |1.164  0.000  1.596|*/
 	  298, -100, -208,	/* gy, gcb, gcr |1.164 -0.392 -0.813|*/
@@ -1320,7 +1340,7 @@ const static struct dispc7_csc_coef csc_yuv2rgb_bt601_lim = {
 };
 
 /* YUV -> RGB, ITU-R BT.709, full range */
-const static struct dispc7_csc_coef csc_yuv2rgb_bt709_full = {
+static const struct dispc7_csc_coef csc_yuv2rgb_bt709_full = {
 	dispc7_csc_yuv2rgb_regval,
 	{ 256,	  0,  402,	/* ry, rcb, rcr |1.000	0.000  1.570|*/
 	  256,  -48, -120,	/* gy, gcb, gcr |1.000 -0.187 -0.467|*/
@@ -1332,7 +1352,7 @@ const static struct dispc7_csc_coef csc_yuv2rgb_bt709_full = {
 };
 
 /* YUV -> RGB, ITU-R BT.709, limited range */
-const static struct dispc7_csc_coef csc_yuv2rgb_bt709_lim = {
+static const struct dispc7_csc_coef csc_yuv2rgb_bt709_lim = {
 	dispc7_csc_yuv2rgb_regval,
 	{ 298,    0,  459,	/* ry, rcb, rcr |1.164  0.000  1.793|*/
 	  298,  -55, -136,	/* gy, gcb, gcr |1.164 -0.213 -0.533|*/
@@ -1344,7 +1364,7 @@ const static struct dispc7_csc_coef csc_yuv2rgb_bt709_lim = {
 };
 
 /* RGB -> YUV, ITU-R BT.601, full range */
-const static struct dispc7_csc_coef csc_rgb2yuv_bt601_full = {
+static const struct dispc7_csc_coef csc_rgb2yuv_bt601_full = {
 	dispc7_csc_rgb2yuv_regval,
 	{ 77,  150,  29,	/* yr,   yg,  yb | 0.299  0.587  0.114|*/
 	 -43,  -85, 128,	/* cbr, cbg, cbb |-0.173 -0.339  0.511|*/
@@ -1356,7 +1376,7 @@ const static struct dispc7_csc_coef csc_rgb2yuv_bt601_full = {
 };
 
 /* RGB -> YUV, ITU-R BT.601, limited range */
-const static struct dispc7_csc_coef csc_rgb2yuv_bt601_lim = {
+static const struct dispc7_csc_coef csc_rgb2yuv_bt601_lim = {
 	dispc7_csc_rgb2yuv_regval,
 	{ 66,  129,  25,	/* yr,   yg,  yb | 0.257  0.504  0.098|*/
 	 -38,  -74, 112,	/* cbr, cbg, cbb |-0.148 -0.291  0.439|*/
@@ -1368,7 +1388,7 @@ const static struct dispc7_csc_coef csc_rgb2yuv_bt601_lim = {
 };
 
 /* RGB -> YUV, ITU-R BT.709, full range */
-const static struct dispc7_csc_coef csc_rgb2yuv_bt709_full = {
+static const struct dispc7_csc_coef csc_rgb2yuv_bt709_full = {
 	dispc7_csc_rgb2yuv_regval,
 	{ 54,  183,  18,	/* yr,   yg,  yb | 0.1826  0.6142  0.0620|*/
 	 -30, -101, 131,	/* cbr, cbg, cbb |-0.1006 -0.3386  0.4392|*/
@@ -1380,7 +1400,7 @@ const static struct dispc7_csc_coef csc_rgb2yuv_bt709_full = {
 };
 
 /* RGB -> YUV, ITU-R BT.709, limited range */
-const static struct dispc7_csc_coef csc_rgb2yuv_bt709_lim = {
+static const struct dispc7_csc_coef csc_rgb2yuv_bt709_lim = {
 	dispc7_csc_rgb2yuv_regval,
 	{ 47,  157,   16,	/* yr,   yg,  yb | 0.1826  0.6142  0.0620|*/
 	 -26,  -87,  112,	/* cbr, cbg, cbb |-0.1006 -0.3386  0.4392|*/
@@ -1397,7 +1417,7 @@ struct dispc7_csc_entry {
 	const struct dispc7_csc_coef *csc;
 };
 
-const static struct dispc7_csc_entry dispc7_yuv2rgb_table[] = {
+static const struct dispc7_csc_entry dispc7_yuv2rgb_table[] = {
 	{ DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_FULL_RANGE,
 	  &csc_yuv2rgb_bt601_full, },
 	{ DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_LIMITED_RANGE,
@@ -1408,7 +1428,7 @@ const static struct dispc7_csc_entry dispc7_yuv2rgb_table[] = {
 	  &csc_yuv2rgb_bt709_lim, },
 };
 
-const static struct dispc7_csc_entry dispc7_rgb2yuv_table[] = {
+static const struct dispc7_csc_entry dispc7_rgb2yuv_table[] = {
 	{ DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_FULL_RANGE,
 	  &csc_rgb2yuv_bt601_full, },
 	{ DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_LIMITED_RANGE,
@@ -1449,7 +1469,7 @@ struct dispc7_csc_coef *dispc7_find_csc(enum dispc7_csc_direction direction,
 static void dispc7_vid_csc_setup(struct dispc_device *dispc, u32 hw_plane,
 				 const struct drm_plane_state *state)
 {
-	const static struct dispc7_csc_coef *coef;
+	static const struct dispc7_csc_coef *coef;
 
 	coef = dispc7_find_csc(DISPC7_YUV2RGB, state->color_encoding,
 			       state->color_range);
@@ -1471,7 +1491,7 @@ static void dispc7_vid_csc_enable(struct dispc_device *dispc, u32 hw_plane,
 static void dispc7_wb_csc_setup(struct dispc_device *dispc,
 				const struct drm_plane_state *state)
 {
-	const static struct dispc7_csc_coef *coef;
+	static const struct dispc7_csc_coef *coef;
 
 	coef = dispc7_find_csc(DISPC7_RGB2YUV, state->color_encoding,
 			       state->color_range);
@@ -2398,13 +2418,9 @@ static bool dispc7_has_writeback(struct dispc_device *dispc)
 	return dispc->wb_managed;
 }
 
-static u32 dispc7_vid_get_fifo_size(struct dispc_device *dispc,
-				    u32 hw_plane)
+static u32 dispc7_vid_get_fifo_size(struct dispc_device *dispc, u32 hw_plane)
 {
-	const u32 unit_size = 16;	/* 128-bits */
-
-	return VID_REG_GET(dispc, hw_plane, DISPC_VID_BUF_SIZE_STATUS, 15, 0) *
-	       unit_size;
+	return VID_REG_GET(dispc, hw_plane, DISPC_VID_BUF_SIZE_STATUS, 15, 0);
 }
 
 static void dispc7_vid_set_mflag_threshold(struct dispc_device *dispc,
@@ -2414,31 +2430,16 @@ static void dispc7_vid_set_mflag_threshold(struct dispc_device *dispc,
 			 FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
 }
 
-static void dispc7_vid_mflag_setup(struct dispc_device *dispc,
-				   u32 hw_plane)
+static void dispc7_vid_set_buf_threshold(struct dispc_device *dispc,
+					 u32 hw_plane, u32 low, u32 high)
 {
-	const u32 unit_size = 16;	/* 128-bits */
-	u32 size = dispc7_vid_get_fifo_size(dispc, hw_plane);
-	u32 low, high;
-
-	/*
-	 * Simulation team suggests below thesholds:
-	 * HT = fifosize * 5 / 8;
-	 * LT = fifosize * 4 / 8;
-	 */
-
-	low = size * 4 / 8 / unit_size;
-	high = size * 5 / 8 / unit_size;
-
-	dispc7_vid_set_mflag_threshold(dispc, hw_plane, low, high);
+	dispc7_vid_write(dispc, hw_plane, DISPC_VID_BUF_THRESHOLD,
+			 FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
 }
 
 static u32 dispc7_wb_get_fifo_size(struct dispc_device *dispc)
 {
-	const u32 unit_size = 16;	/* 128-bits */
-
-	return WB_REG_GET(dispc, DISPC_VID_BUF_SIZE_STATUS, 15, 0) *
-	       unit_size;
+	return WB_REG_GET(dispc, DISPC_VID_BUF_SIZE_STATUS, 15, 0);
 }
 
 static void dispc7_wb_set_mflag_threshold(struct dispc_device *dispc,
@@ -2448,55 +2449,85 @@ static void dispc7_wb_set_mflag_threshold(struct dispc_device *dispc,
 			FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
 }
 
-static void dispc7_wb_mflag_setup(struct dispc_device *dispc)
+static void dispc7_wb_set_buf_threshold(struct dispc_device *dispc,
+					 u32 low, u32 high)
 {
-	const u32 unit_size = 16;	/* 128-bits */
-	u32 size = dispc7_wb_get_fifo_size(dispc);
-	u32 low, high;
+	dispc7_wb_write(dispc, DISPC_WB_BUF_THRESHOLD,
+			FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
+}
 
-	/*
-	 * Simulation team suggests below thesholds:
-	 * HT = fifosize * 5 / 8;
-	 * LT = fifosize * 4 / 8;
-	 */
+static void dispc7_plane_init(struct dispc_device *dispc)
+{
+	unsigned int hw_plane;
 
-	low = size * 4 / 8 / unit_size;
-	high = size * 5 / 8 / unit_size;
+	dev_dbg(dispc->dev, "%s()\n", __func__);
 
-	dispc7_wb_set_mflag_threshold(dispc, low, high);
-}
+	if (dispc->has_cfg_common) {
+		u32 cba_lo_pri = 1;
+		u32 cba_hi_pri = 0;
 
-static void dispc7_mflag_setup(struct dispc_device *dispc)
-{
-	unsigned int i;
+		CFG_REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_lo_pri, 2, 0);
+		CFG_REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_hi_pri, 5, 3);
 
-	if (!dispc->has_cfg_common)
-		goto no_cfg;
+		/* MFLAG_CTRL = ENABLED */
+		CFG_REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0);
+		/* MFLAG_START = MFLAGNORMALSTARTMODE */
+		CFG_REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
+	}
 
-	/* MFLAG_CTRL = ENABLED */
-	CFG_REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0);
-	/* MFLAG_START = MFLAGNORMALSTARTMODE */
-	CFG_REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
+	dispc_for_each_managed_plane(dispc, hw_plane) {
+		u32 size = dispc7_vid_get_fifo_size(dispc, hw_plane);
+		u32 thr_low, thr_high;
+		u32 mflag_low, mflag_high;
+		u32 preload;
 
-no_cfg:
-	dispc_for_each_managed_plane(dispc, i)
-		dispc7_vid_mflag_setup(dispc, i);
+		thr_high = size - 1;
+		thr_low = size / 2;
 
-	if (dispc7_has_writeback(dispc))
-		dispc7_wb_mflag_setup(dispc);
-}
+		mflag_high = size * 2 / 3;
+		mflag_low = size / 3;
 
-static void dispc7_plane_init(struct dispc_device *dispc)
-{
-	unsigned int i;
+		preload = thr_low;
 
-	dev_dbg(dispc->dev, "%s()\n", __func__);
+		dev_dbg(dispc->dev,
+			"%s: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u preload %u\n",
+			dispc7_plane_name(dispc, hw_plane),
+			size,
+			thr_high, thr_low,
+			mflag_high, mflag_low,
+			preload);
 
-	/* FIFO underflows when scaling if preload is not high enough */
-	dispc_for_each_managed_plane(dispc, i)
-		if (!dispc->feat->vid_lite[i])
-			VID_REG_FLD_MOD(dispc, i, DISPC_VID_PRELOAD,
-					0x7FF, 11, 0);
+		dispc7_vid_set_buf_threshold(dispc, hw_plane,
+					     thr_low, thr_high);
+		dispc7_vid_set_mflag_threshold(dispc, hw_plane,
+					       mflag_low, mflag_high);
+
+		dispc7_vid_write(dispc, hw_plane, DISPC_VID_PRELOAD, preload);
+
+		/* Prefech up to PRELOAD value */
+		VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0, 19, 19);
+	}
+
+	if (dispc7_has_writeback(dispc)) {
+		u32 size = dispc7_wb_get_fifo_size(dispc);
+		u32 thr_low, thr_high;
+		u32 mflag_low, mflag_high;
+
+		thr_high = size - 1;
+		thr_low = size / 2;
+
+		mflag_high = size * 2 / 3;
+		mflag_low = size / 3;
+
+		dev_dbg(dispc->dev,
+			"wb: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u\n",
+			size,
+			thr_high, thr_low,
+			mflag_high, mflag_low);
+
+		dispc7_wb_set_buf_threshold(dispc, thr_low, thr_high);
+		dispc7_wb_set_mflag_threshold(dispc, mflag_low, mflag_high);
+	}
 }
 
 static void dispc7_vp_init(struct dispc_device *dispc)
@@ -2512,7 +2543,6 @@ static void dispc7_vp_init(struct dispc_device *dispc)
 
 static void dispc7_initial_config(struct dispc_device *dispc)
 {
-	dispc7_mflag_setup(dispc);
 	dispc7_plane_init(dispc);
 	dispc7_vp_init(dispc);
 
@@ -2799,10 +2829,9 @@ no_cfg:
 	return 0;
 }
 
-static int dispc7_wb_find_free_ovr(struct dispc_device *dispc)
+static void dispc7_wb_find_free_ovr(struct dispc_device *dispc)
 {
 	struct tidss_device *tidss = dispc->tidss;
-	struct device *dev = tidss->dev;
 	int i, j;
 	bool found;
 	u32 ovr_id = 0xff;
@@ -2825,18 +2854,10 @@ static int dispc7_wb_find_free_ovr(struct dispc_device *dispc)
 		}
 	}
 
-	if (ovr_id != 0xff) {
+	if (ovr_id != 0xff)
 		dispc->wb_reserved_ovr = ovr_id;
-
-		dev_info(dev, "%s: found ovr %s (%d)\n", __func__,
-			 tidss->dispc_ops->vp_name(tidss->dispc, ovr_id), ovr_id);
-
-		return 0;
-	}
-
-	dispc->wb_managed = false;
-	dev_warn(dev, "%s: No OVR available for WB, disabling WB.\n", __func__);
-	return -1;
+	else
+		dispc->wb_managed = false;
 }
 
 static u32 dispc7_wb_get_reserved_ovr(struct dispc_device *dispc)
@@ -2905,6 +2926,8 @@ static int dispc7_modeset_init(struct dispc_device *dispc)
 				conn_type = DRM_MODE_CONNECTOR_DPI;
 				break;
 			default:
+				dev_warn(dev, "%s: Bad vp bus type: %d\n",
+					 __func__, dispc->feat->vp_bus_type[i]);
 				conn_type = DRM_MODE_CONNECTOR_Unknown;
 				break;
 			}

+ 5 - 5
drivers/gpu/drm/tidss/tidss_dispc7.h

@@ -27,8 +27,10 @@ struct dispc7_errata {
 };
 
 enum dispc7_vp_bus_type {
-	DISPC7_VP_DPI,
-	DISPC7_VP_OLDI,
+	DISPC7_VP_DPI,		/* DPI output */
+	DISPC7_VP_OLDI,		/* OLDI (LVDS) output */
+	DISPC7_VP_INTERNAL,	/* SoC internal routing */
+	DISPC7_VP_MAX_BUS_TYPE,
 };
 
 enum dispc7_dss_subrevision {
@@ -37,9 +39,7 @@ enum dispc7_dss_subrevision {
 };
 
 struct dispc7_features {
-	/* XXX should these come from the .dts? Min pclk is not feature of DSS IP */
-	unsigned long min_pclk;
-	unsigned long max_pclk;
+	int max_pclk_kHz[DISPC7_VP_MAX_BUS_TYPE];
 
 	u32 num_commons;
 	const char *common_name[DISPC7_MAX_COMMONS];

+ 0 - 3
drivers/gpu/drm/tidss/tidss_plane.c

@@ -248,9 +248,6 @@ struct drm_plane *tidss_plane_reserve_wb(struct drm_device *dev)
 	int i;
 	u32 ovr_id = tidss->dispc_ops->wb_get_reserved_ovr(tidss->dispc);
 
-	dev_dbg(dev->dev, "%s: found ovr %s (%d)\n", __func__,
-		tidss->dispc_ops->vp_name(tidss->dispc, ovr_id), ovr_id);
-
 	for (i = tidss->num_planes - 1; i >= 0; --i) {
 		struct drm_plane *plane = tidss->planes[i];
 		struct tidss_plane *tplane = to_tidss_plane(plane);

+ 0 - 7
drivers/gpu/drm/tidss/tidss_scale_coefs.c

@@ -121,13 +121,6 @@ static const struct tidss_scale_coefs coef3_M8 = {
 	.c0 = { 512, 502, 492, 462, 432, 390, 348, 174, 256, },
 };
 
-/* Nearest neigbor coefficients for testing */
-static const struct tidss_scale_coefs coefs_null = {
-	.c2 = { 0 },
-	.c1 = { 0 },
-	.c0 = { 512, 512, 512, 512, 512, 512, 512, 512, 256,  },
-};
-
 const struct tidss_scale_coefs *tidss_get_scale_coefs(struct device *dev,
 						      u32 firinc,
 						      bool five_taps)

+ 1 - 2
drivers/gpu/drm/tidss/tidss_wb.c

@@ -147,12 +147,11 @@ int tidss_wb_init(struct drm_device *drmdev)
 	ret = tidss_wbm2m_init(wdev);
 	if (ret) {
 		log_err(wdev, "Failed to initialize wb m2m\n");
+		return ret;
 	}
 
 	log_dbg(wdev, "WB loaded\n");
 	return 0;
-
-	return ret;
 }
 
 void tidss_wb_cleanup(struct drm_device *drmdev)

+ 1 - 1
drivers/media/i2c/ov5640.c

@@ -2588,7 +2588,6 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
 		v4l2_ctrl_new_std(hdl, ops,
 				  V4L2_CID_PIXEL_RATE, 0, INT_MAX, 1,
 				  55969920);
-	ctrls->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY;
 
 	/* Auto/manual white balance */
 	ctrls->auto_wb = v4l2_ctrl_new_std(hdl, ops,
@@ -2637,6 +2636,7 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
 		goto free_ctrls;
 	}
 
+	ctrls->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY;
 	ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
 	ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
 

+ 109 - 151
drivers/phy/cadence/phy-cadence-dp.c

@@ -6,6 +6,7 @@
  *
  */
 
+#include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/io.h>
@@ -19,13 +20,14 @@
 #include <linux/platform_device.h>
 #include <linux/reset.h>
 
-#define REF_CLK_19_2MHz
+#define REF_CLK_19_2MHz		19200000
+#define REF_CLK_25MHz		25000000
 
 #define DEFAULT_NUM_LANES	4
 #define MAX_NUM_LANES		4
 #define DEFAULT_MAX_BIT_RATE	8100 /* in Mbps */
 
-#define POLL_TIMEOUT_US		2000
+#define POLL_TIMEOUT_US		5000
 #define LANE_MASK		0x7
 
 /*
@@ -45,6 +47,7 @@
 #define PHY_POWER_STATE_LN_1	0x0008
 #define PHY_POWER_STATE_LN_2	0x0010
 #define PHY_POWER_STATE_LN_3	0x0018
+#define PMA_XCVR_POWER_STATE_REQ_LN_MASK	0x3FU
 #define PHY_PMA_XCVR_POWER_STATE_ACK	0x30
 #define PHY_PMA_CMN_READY		0x34
 #define PHY_PMA_XCVR_TX_VMARGIN		0x38
@@ -165,19 +168,26 @@ struct cdns_dp_phy {
 	u32 max_bit_rate; /* Maximum link bit rate to use (in Mbps) */
 	struct reset_control *phy_rst;
 	struct device *dev;
+	struct clk *clk;
+	unsigned long ref_clk_rate;
+};
+
+enum phy_powerstate {
+	POWERSTATE_A0 = 0,
+	// Powerstate A1 is unused
+	POWERSTATE_A2 = 2,
+	POWERSTATE_A3 = 3,
 };
 
 static int cdns_dp_phy_init(struct phy *phy);
-static void cdns_dp_phy_run(struct cdns_dp_phy *cdns_phy);
+static int cdns_dp_phy_exit(struct phy *phy);
+static int cdns_dp_phy_run(struct cdns_dp_phy *cdns_phy);
 static int cdns_dp_phy_wait_pma_cmn_ready(struct cdns_dp_phy *cdns_phy);
 static void cdns_dp_phy_pma_cfg(struct cdns_dp_phy *cdns_phy);
-#ifdef REF_CLK_19_2MHz
 static void cdns_dp_phy_pma_cmn_cfg_19_2mhz(struct cdns_dp_phy *cdns_phy);
 static void cdns_dp_phy_pma_cmn_vco_cfg_19_2mhz(struct cdns_dp_phy *cdns_phy, u32 rate, bool ssc);
-#else
 static void cdns_dp_phy_pma_cmn_cfg_25mhz(struct cdns_dp_phy *cdns_phy);
 static void cdns_dp_phy_pma_cmn_vco_cfg_25mhz(struct cdns_dp_phy *cdns_phy, u32 rate, bool ssc);
-#endif
 static void cdns_dp_phy_pma_lane_cfg(struct cdns_dp_phy *cdns_phy,
 				     unsigned int lane);
 static void cdns_dp_phy_pma_cmn_rate(struct cdns_dp_phy *cdns_phy,
@@ -188,12 +198,17 @@ static void cdns_dp_phy_write_field(struct cdns_dp_phy *cdns_phy,
 				    unsigned char num_bits,
 				    unsigned int val);
 static int cdns_dp_phy_configure(struct phy *phy, union phy_configure_opts *opts);
+static void cdns_dp_phy_set_a0_pll(struct cdns_dp_phy *cdns_phy, u32 num_lanes);
+static int cdns_dp_phy_set_power_state(struct cdns_dp_phy *cdns_phy,
+				       u32 num_lanes,
+				       enum phy_powerstate powerstate);
 
 static int cdns_dp_phy_on(struct phy *gphy);
 static int cdns_dp_phy_off(struct phy *gphy);
 
 static const struct phy_ops cdns_dp_phy_ops = {
 	.init		= cdns_dp_phy_init,
+	.exit		= cdns_dp_phy_exit,
 	.configure	= cdns_dp_phy_configure,
 	.power_on	= cdns_dp_phy_on,
 	.power_off	= cdns_dp_phy_off,
@@ -310,13 +325,6 @@ static const struct coefficients voltage_coeffs[4][4] = {
 	}
 };
 
-enum phy_powerstate {
-	POWERSTATE_A0 = 0,
-	// Powerstate A1 is unused
-	POWERSTATE_A2 = 2,
-	POWERSTATE_A3 = 3,
-};
-
 static int cdns_dp_phy_init(struct phy *phy)
 {
 	unsigned char lane_bits;
@@ -324,6 +332,29 @@ static int cdns_dp_phy_init(struct phy *phy)
 
 	struct cdns_dp_phy *cdns_phy = phy_get_drvdata(phy);
 
+	r = clk_prepare_enable(cdns_phy->clk);
+	if (r) {
+		dev_err(cdns_phy->dev, "Failed to prepare ref clock\n");
+		return r;
+	}
+
+	cdns_phy->ref_clk_rate = clk_get_rate(cdns_phy->clk);
+	if (!(cdns_phy->ref_clk_rate)) {
+		dev_err(cdns_phy->dev, "Failed to get ref clock rate\n");
+		clk_disable_unprepare(cdns_phy->clk);
+		return -EINVAL;
+	}
+
+	switch (cdns_phy->ref_clk_rate) {
+	case REF_CLK_19_2MHz:
+	case REF_CLK_25MHz:
+		/* Valid Ref Clock Rate */
+		break;
+	default:
+		dev_err(cdns_phy->dev, "Unsupported Ref Clock Rate\n");
+		return -EINVAL;
+	}
+
 	cdns_dp_phy_write_dp(cdns_phy, PHY_AUX_CTRL, 0x0003); /* enable AUX */
 
 	/* PHY PMA registers configuration function */
@@ -333,40 +364,7 @@ static int cdns_dp_phy_init(struct phy *phy)
 	 * Set lines power state to A0
 	 * Set lines pll clk enable to 0
 	 */
-
-	cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_POWER_STATE_REQ,
-				PHY_POWER_STATE_LN_0, 6, 0x0000);
-
-	if (cdns_phy->num_lanes >= 2) {
-		cdns_dp_phy_write_field(cdns_phy,
-					PHY_PMA_XCVR_POWER_STATE_REQ,
-					PHY_POWER_STATE_LN_1, 6, 0x0000);
-
-		if (cdns_phy->num_lanes == 4) {
-			cdns_dp_phy_write_field(cdns_phy,
-						PHY_PMA_XCVR_POWER_STATE_REQ,
-						PHY_POWER_STATE_LN_2, 6, 0);
-			cdns_dp_phy_write_field(cdns_phy,
-						PHY_PMA_XCVR_POWER_STATE_REQ,
-						PHY_POWER_STATE_LN_3, 6, 0);
-		}
-	}
-
-	cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN,
-				0, 1, 0x0000);
-
-	if (cdns_phy->num_lanes >= 2) {
-		cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN,
-					1, 1, 0x0000);
-		if (cdns_phy->num_lanes == 4) {
-			cdns_dp_phy_write_field(cdns_phy,
-						PHY_PMA_XCVR_PLLCLK_EN,
-						2, 1, 0x0000);
-			cdns_dp_phy_write_field(cdns_phy,
-						PHY_PMA_XCVR_PLLCLK_EN,
-						3, 1, 0x0000);
-		}
-	}
+	cdns_dp_phy_set_a0_pll(cdns_phy, cdns_phy->num_lanes);
 
 	/*
 	 * release phy_l0*_reset_n and pma_tx_elec_idle_ln_* based on
@@ -380,11 +378,10 @@ static int cdns_dp_phy_init(struct phy *phy)
 
 	/* PHY PMA registers configuration functions */
 	/* Initialize PHY with max supported link rate, without SSC. */
-#ifdef REF_CLK_19_2MHz
-	cdns_dp_phy_pma_cmn_vco_cfg_19_2mhz(cdns_phy, cdns_phy->max_bit_rate, false);
-#else
-	cdns_dp_phy_pma_cmn_vco_cfg_25mhz(cdns_phy, cdns_phy->max_bit_rate, false);
-#endif
+	if (cdns_phy->ref_clk_rate ==  REF_CLK_19_2MHz)
+		cdns_dp_phy_pma_cmn_vco_cfg_19_2mhz(cdns_phy, cdns_phy->max_bit_rate, false);
+	else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz)
+		cdns_dp_phy_pma_cmn_vco_cfg_25mhz(cdns_phy, cdns_phy->max_bit_rate, false);
 	cdns_dp_phy_pma_cmn_rate(cdns_phy, cdns_phy->max_bit_rate, cdns_phy->num_lanes);
 
 	/* take out of reset */
@@ -394,8 +391,16 @@ static int cdns_dp_phy_init(struct phy *phy)
 	if (r)
 		return r;
 
-	cdns_dp_phy_run(cdns_phy);
+	r = cdns_dp_phy_run(cdns_phy);
+
+	return r;
+}
+
+static int cdns_dp_phy_exit(struct phy *phy)
+{
+	struct cdns_dp_phy *cdns_phy = phy_get_drvdata(phy);
 
+	clk_disable_unprepare(cdns_phy->clk);
 	return 0;
 }
 
@@ -405,7 +410,7 @@ static int cdns_dp_phy_wait_pma_cmn_ready(struct cdns_dp_phy *cdns_phy)
 	int ret;
 
 	ret = cdns_phy_read_dp_poll_timeout(cdns_phy, PHY_PMA_CMN_READY, reg,
-					    reg & 1, 0, 5000);
+					    reg & 1, 0, POLL_TIMEOUT_US);
 	if (ret == -ETIMEDOUT) {
 		dev_err(cdns_phy->dev,
 			"timeout waiting for PMA common ready\n");
@@ -419,21 +424,18 @@ static void cdns_dp_phy_pma_cfg(struct cdns_dp_phy *cdns_phy)
 {
 	unsigned int i;
 
-#ifdef REF_CLK_19_2MHz
-	/* PMA common configuration 19.2MHz */
-	cdns_dp_phy_pma_cmn_cfg_19_2mhz(cdns_phy);
-#else
-	/* PMA common configuration 25MHz */
-	cdns_dp_phy_pma_cmn_cfg_25mhz(cdns_phy);
-#endif
+	if (cdns_phy->ref_clk_rate ==  REF_CLK_19_2MHz)
+		/* PMA common configuration 19.2MHz */
+		cdns_dp_phy_pma_cmn_cfg_19_2mhz(cdns_phy);
+	else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz)
+		/* PMA common configuration 25MHz */
+		cdns_dp_phy_pma_cmn_cfg_25mhz(cdns_phy);
 
 	/* PMA lane configuration to deal with multi-link operation */
 	for (i = 0; i < cdns_phy->num_lanes; i++)
 		cdns_dp_phy_pma_lane_cfg(cdns_phy, i);
 }
 
-#ifdef REF_CLK_19_2MHz
-
 static void cdns_dp_phy_pma_cmn_cfg_19_2mhz(struct cdns_dp_phy *cdns_phy)
 {
 	/* refclock registers - assumes 19.2 MHz refclock */
@@ -591,7 +593,6 @@ static void cdns_dp_phy_pma_cmn_vco_cfg_19_2mhz(struct cdns_dp_phy *cdns_phy,
 	cdns_dp_phy_write_phy(cdns_phy, CMN_PLL1_LOCK_PLLCNT_START, 0x0099);
 }
 
-#else
 
 static void cdns_dp_phy_pma_cmn_cfg_25mhz(struct cdns_dp_phy *cdns_phy)
 {
@@ -742,7 +743,6 @@ static void cdns_dp_phy_pma_cmn_vco_cfg_25mhz(struct cdns_dp_phy *cdns_phy,
 	cdns_dp_phy_write_phy(cdns_phy, CMN_PLL1_LOCK_PLLCNT_START, 0x00C7);
 }
 
-#endif
 
 static void cdns_dp_phy_pma_cmn_rate(struct cdns_dp_phy *cdns_phy,
 				     u32 rate, u32 lanes)
@@ -793,11 +793,10 @@ static void cdns_dp_phy_pma_lane_cfg(struct cdns_dp_phy *cdns_phy,
 				     unsigned int lane)
 {
 	/* Per lane, refclock-dependent receiver detection setting */
-#ifdef REF_CLK_19_2MHz
-	cdns_dp_phy_write_phy(cdns_phy, TX_RCVDET_ST_TMR(lane), 0x0780);
-#else
-	cdns_dp_phy_write_phy(cdns_phy, TX_RCVDET_ST_TMR(lane), 0x09C4);
-#endif
+	if (cdns_phy->ref_clk_rate ==  REF_CLK_19_2MHz)
+		cdns_dp_phy_write_phy(cdns_phy, TX_RCVDET_ST_TMR(lane), 0x0780);
+	else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz)
+		cdns_dp_phy_write_phy(cdns_phy, TX_RCVDET_ST_TMR(lane), 0x09C4);
 
 	/* Writing Tx/Rx Power State Controllers registers */
 	cdns_dp_phy_write_phy(cdns_phy, TX_PSC_A0(lane), 0x00FB);
@@ -818,12 +817,9 @@ static void cdns_dp_phy_pma_lane_cfg(struct cdns_dp_phy *cdns_phy,
 	cdns_dp_phy_write_phy(cdns_phy, XCVR_DIAG_HSCLK_SEL(lane), 0x0000);
 }
 
-static void cdns_dp_phy_run(struct cdns_dp_phy *cdns_phy)
+static int cdns_dp_phy_run(struct cdns_dp_phy *cdns_phy)
 {
 	unsigned int read_val;
-	u32 write_val1 = 0;
-	u32 write_val2 = 0;
-	u32 mask = 0;
 	int ret;
 
 	/*
@@ -832,54 +828,21 @@ static void cdns_dp_phy_run(struct cdns_dp_phy *cdns_phy)
 	 */
 	ret = cdns_phy_read_dp_poll_timeout(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN_ACK,
 					    read_val, read_val & 1, 0, POLL_TIMEOUT_US);
-	if (ret == -ETIMEDOUT)
+	if (ret == -ETIMEDOUT) {
 		dev_err(cdns_phy->dev,
 			"timeout waiting for link PLL clock enable ack\n");
-
-	ndelay(100);
-
-	switch (cdns_phy->num_lanes) {
-
-	case 1:	/* lane 0 */
-		write_val1 = 0x00000004;
-		write_val2 = 0x00000001;
-		mask = 0x0000003f;
-		break;
-	case 2: /* lane 0-1 */
-		write_val1 = 0x00000404;
-		write_val2 = 0x00000101;
-		mask = 0x00003f3f;
-		break;
-	case 4: /* lane 0-3 */
-		write_val1 = 0x04040404;
-		write_val2 = 0x01010101;
-		mask = 0x3f3f3f3f;
-		break;
+		return ret;
 	}
 
-	cdns_dp_phy_write_dp(cdns_phy, PHY_PMA_XCVR_POWER_STATE_REQ, write_val1);
-
-	ret = cdns_phy_read_dp_poll_timeout(cdns_phy, PHY_PMA_XCVR_POWER_STATE_ACK,
-					    read_val, (read_val & mask) == write_val1, 0,
-					    POLL_TIMEOUT_US);
-	if (ret == -ETIMEDOUT)
-		dev_err(cdns_phy->dev,
-			"timeout waiting for link power state ack\n");
-
-	cdns_dp_phy_write_dp(cdns_phy, PHY_PMA_XCVR_POWER_STATE_REQ, 0);
 	ndelay(100);
 
-	cdns_dp_phy_write_dp(cdns_phy, PHY_PMA_XCVR_POWER_STATE_REQ, write_val2);
+	ret = cdns_dp_phy_set_power_state(cdns_phy, cdns_phy->num_lanes, POWERSTATE_A2);
+	if (ret)
+		return ret;
 
-	ret = cdns_phy_read_dp_poll_timeout(cdns_phy, PHY_PMA_XCVR_POWER_STATE_ACK,
-					    read_val, (read_val & mask) == write_val2, 0,
-					    POLL_TIMEOUT_US);
-	if (ret == -ETIMEDOUT)
-		dev_err(cdns_phy->dev,
-			"timeout waiting for link power state ack\n");
+	ret = cdns_dp_phy_set_power_state(cdns_phy, cdns_phy->num_lanes, POWERSTATE_A0);
 
-	cdns_dp_phy_write_dp(cdns_phy, PHY_PMA_XCVR_POWER_STATE_REQ, 0);
-	ndelay(100);
+	return ret;
 }
 
 static void cdns_dp_phy_write_field(struct cdns_dp_phy *cdns_phy,
@@ -983,6 +946,12 @@ static int cdns_dp_phy_probe(struct platform_device *pdev)
 		return -EINVAL;
 	}
 
+	cdns_phy->clk = devm_clk_get(dev, "refclk");
+	if (IS_ERR(cdns_phy->clk)) {
+		dev_err(dev, "phy ref clock not found\n");
+		return PTR_ERR(cdns_phy->clk);
+	}
+
 	phy_set_drvdata(phy, cdns_phy);
 
 	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
@@ -996,12 +965,11 @@ static int cdns_dp_phy_probe(struct platform_device *pdev)
 }
 
 static int cdns_dp_phy_set_power_state(struct cdns_dp_phy *cdns_phy,
-				       struct phy_configure_opts_dp *dp,
+				       u32 num_lanes,
 				       enum phy_powerstate powerstate)
 {
 	/* Register value for power state for a single byte. */
 	u32 value_part;
-
 	u32 value;
 	u32 mask;
 	u32 read_val;
@@ -1021,7 +989,7 @@ static int cdns_dp_phy_set_power_state(struct cdns_dp_phy *cdns_phy,
 	}
 
 	/* Select values of registers and mask, depending on enabled lane count. */
-	switch (dp->lanes) {
+	switch (num_lanes) {
 	// lane 0
 	case (1):
 		value = value_part;
@@ -1130,15 +1098,15 @@ static int cdns_dp_phy_configure_rate(struct cdns_dp_phy *cdns_phy,
 	ndelay(200);
 
 	/* DP Rate Change - VCO Output settings. */
-#ifdef REF_CLK_19_2MHz
-	/* PMA common configuration 19.2MHz */
-	cdns_dp_phy_pma_cmn_vco_cfg_19_2mhz(cdns_phy, dp->link_rate, dp->ssc);
-	cdns_dp_phy_pma_cmn_cfg_19_2mhz(cdns_phy);
-#else
-	/* PMA common configuration 25MHz */
-	cdns_dp_phy_pma_cmn_vco_cfg_25mhz(cdns_phy, dp->link_rate, dp->ssc);
-	cdns_dp_phy_pma_cmn_cfg_25mhz(cdns_phy);
-#endif
+	if (cdns_phy->ref_clk_rate ==  REF_CLK_19_2MHz) {
+		/* PMA common configuration 19.2MHz */
+		cdns_dp_phy_pma_cmn_vco_cfg_19_2mhz(cdns_phy, dp->link_rate, dp->ssc);
+		cdns_dp_phy_pma_cmn_cfg_19_2mhz(cdns_phy);
+	} else if (cdns_phy->ref_clk_rate == REF_CLK_25MHz) {
+		/* PMA common configuration 25MHz */
+		cdns_dp_phy_pma_cmn_vco_cfg_25mhz(cdns_phy, dp->link_rate, dp->ssc);
+		cdns_dp_phy_pma_cmn_cfg_25mhz(cdns_phy);
+	}
 	cdns_dp_phy_pma_cmn_rate(cdns_phy, dp->link_rate, dp->lanes);
 
 	/* Enable the cmn_pll0_en. */
@@ -1215,23 +1183,25 @@ static int cdns_dp_phy_verify_config(struct cdns_dp_phy *cdns_phy,
 
 /* Set power state A0 and PLL clock enable to 0 on enabled lanes. */
 static void cdns_dp_phy_set_a0_pll(struct cdns_dp_phy *cdns_phy,
-				   struct phy_configure_opts_dp *dp)
+				   u32 num_lanes)
 {
 	u32 pwr_state = cdns_dp_phy_read_dp(cdns_phy, PHY_PMA_XCVR_POWER_STATE_REQ);
 	u32 pll_clk_en = cdns_dp_phy_read_dp(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN);
 
 	/* Lane 0 is always enabled. */
-	pwr_state &= ~0x1FU;
+	pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK << PHY_POWER_STATE_LN_0);
 	pll_clk_en &= ~0x01U;
 
-	if (dp->lanes > 1) {
-		pwr_state &= ~(0x1FU << 8);
+	if (num_lanes > 1) {
+		/* lane 1 */
+		pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK << PHY_POWER_STATE_LN_1);
 		pll_clk_en &= ~(0x01U << 1);
 	}
 
-	if (dp->lanes > 2) {
-		pwr_state &= ~(0x1FU << 16);
-		pwr_state &= ~(0x1FU << 24);
+	if (num_lanes > 2) {
+		/* lanes 2 and 3 */
+		pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK << PHY_POWER_STATE_LN_2);
+		pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK << PHY_POWER_STATE_LN_3);
 		pll_clk_en &= ~(0x01U << 2);
 		pll_clk_en &= ~(0x01U << 3);
 	}
@@ -1265,15 +1235,14 @@ static int cdns_dp_phy_set_lanes(struct cdns_dp_phy *cdns_phy,
 	value = (value & 0x0000FFF0) | (0x0000000E & lane_mask);
 	cdns_dp_phy_write_dp(cdns_phy, PHY_RESET, value);
 
-	cdns_dp_phy_set_a0_pll(cdns_phy, dp);
+	cdns_dp_phy_set_a0_pll(cdns_phy, dp->lanes);
 
 	/* release phy_l0*_reset_n based on used laneCount */
 	value = (value & 0x0000FFF0) | (0x0000000F & lane_mask);
 	cdns_dp_phy_write_dp(cdns_phy, PHY_RESET, value);
 
 	/* Wait, until PHY gets ready after releasing PHY reset signal. */
-	ret = cdns_phy_read_dp_poll_timeout(cdns_phy, PHY_PMA_CMN_READY, value,
-					    (value & 0x01) != 0, 0, POLL_TIMEOUT_US);
+	ret = cdns_dp_phy_wait_pma_cmn_ready(cdns_phy);
 	if (ret)
 		return ret;
 
@@ -1282,18 +1251,7 @@ static int cdns_dp_phy_set_lanes(struct cdns_dp_phy *cdns_phy,
 	/* release pma_xcvr_pllclk_en_ln_*, only for the master lane */
 	cdns_dp_phy_write_dp(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN, 0x0001);
 
-	/* waiting for ACK of pma_xcvr_pllclk_en_ln_*, only for the master lane */
-	ret = cdns_phy_read_dp_poll_timeout(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN_ACK,
-					    value, (value & 0x01) != 0, 0, POLL_TIMEOUT_US);
-	if (ret)
-		return ret;
-
-	ndelay(100);
-
-	ret = cdns_dp_phy_set_power_state(cdns_phy, dp, POWERSTATE_A2);
-	if (ret)
-		return ret;
-	ret = cdns_dp_phy_set_power_state(cdns_phy, dp, POWERSTATE_A0);
+	ret = cdns_dp_phy_run(cdns_phy);
 
 	return ret;
 }
@@ -1304,7 +1262,7 @@ static int cdns_dp_phy_set_rate(struct cdns_dp_phy *cdns_phy,
 {
 	u32 ret;
 
-	ret = cdns_dp_phy_set_power_state(cdns_phy, dp, POWERSTATE_A3);
+	ret = cdns_dp_phy_set_power_state(cdns_phy, dp->lanes, POWERSTATE_A3);
 	if (ret)
 		return ret;
 	ret = cdns_dp_phy_set_pll_en(cdns_phy, dp, false);
@@ -1320,10 +1278,10 @@ static int cdns_dp_phy_set_rate(struct cdns_dp_phy *cdns_phy,
 	ret = cdns_dp_phy_set_pll_en(cdns_phy, dp, true);
 	if (ret)
 		return ret;
-	ret = cdns_dp_phy_set_power_state(cdns_phy, dp, POWERSTATE_A2);
+	ret = cdns_dp_phy_set_power_state(cdns_phy, dp->lanes, POWERSTATE_A2);
 	if (ret)
 		return ret;
-	ret = cdns_dp_phy_set_power_state(cdns_phy, dp, POWERSTATE_A0);
+	ret = cdns_dp_phy_set_power_state(cdns_phy, dp->lanes, POWERSTATE_A0);
 	if (ret)
 		return ret;
 	ndelay(900);