Răsfoiți Sursa

Merge branch 'audio_display-ti-linux-4.19.y' of git.ti.com:~jyrisarha/ti-linux-kernel/jyrisarhas-audio-video-linux-feature-tree into ti-linux-4.19.y

TI-Feature: audio-display
TI-Tree: git@git.ti.com:~jyrisarha/ti-linux-kernel/jyrisarhas-audio-video-linux-feature-tree.git
TI-Branch: audio_display-ti-linux-4.19.y

* 'audio_display-ti-linux-4.19.y' of git.ti.com:~jyrisarha/ti-linux-kernel/jyrisarhas-audio-video-linux-feature-tree:
  ti_config_fragments: add config for display sharing
  arm64: dts: ti: add display sharing overlay
  dt-bindings: ti,j7-dss: New bindings for J721E display-sharing
  drm/tidss: do not run pm ops for virtual crtcs
  drm/tidss: add support for remote resources
  drm/tidss: add support for rpmsg-kdrv display
  rpmsg-kdrv: Add support for device virtualization
  drm/tidss: static partition of vp and planes
  drm/tidss: static partitionning of common areas
  drm/tidss: separate functions for common area
  drm/tidss: add get_irq() to dispc_ops

Signed-off-by: LCPD Auto Merger <lcpd_integration@list.ti.com>
LCPD Auto Merger 6 ani în urmă
părinte
comite
37ba326000

+ 104 - 0
Documentation/devicetree/bindings/display/ti/ti,j7-dss.txt

@@ -11,5 +11,109 @@ Required properties:
 - interrupts: phandle to DISPC interrupts
 - interrupt-names: "common_m", "common_s0", "common_s1", and "common_s2"
 
+Optional properties:
+- dss-planes: partitioning information for pipes, if display sharing is required
+- dss-vps: partitioning information for video-ports, if display sharing is required
+- dss-commons: partitioning information for common areas, if display sharing is required
+- dss-remote: remote name that is expected to be advertised by the display sharing firmware
+
 The DSS outputs are described using the device graphs as documented in
 Documentation/devicetree/bindings/graph.txt.
+
+Display Sharing on Texas Instruments J721E
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+
+Partitioning the pipes
+-----------------------
+
+In J721E, multiple video-ports, pipes, and common regions can be driven by separate
+compute entities, and therefore should not be accessed by tidss driver. To ensure this,
+optional properties like dss-planes, dss-vps and dss-commons should be used as required
+by the setup.
+
+Pipe partitioning is described by the 'dss-planes' node. Each pipe is repesented by a 'plane'
+node inside the 'dss-planes' node.
+
+Each 'plane' node must have a 'reg' property to identify the pipe, and a 'managed' property to
+indicate whether the pipe is managed by the driver. the managed property must be 0 if the
+pipe is not to be accessed by the driver.
+
+Since the children of the 'dss-planes' node uses 'reg' properties to identify pipes, it must have '#address-cells'
+and '#size-cells' properties, set to 1 and 0 respectively.
+
+dss-planes {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	/* vid1, managed by tidss */
+	plane@0 {
+		reg = <0>;
+		managed = <1>;
+	};
+
+	/* vidl1, not managed by tidss */
+	plane@1 {
+		reg = <1>;
+		managed = <0>;
+	};
+
+	...
+};
+
+Partitioning the video-ports
+-----------------------------
+
+Video port partitioning is described by the 'dss-vps' node. Each video-port is repesented by a 'vp'
+node inside the 'dss-vps' node.
+
+Each 'vp' node must have a 'reg' property to identify the video-port, and a 'managed' property to
+indicate whether the video-port is managed by the driver. the managed property must be 0 if the
+video-port is not to be accessed by the driver.
+
+Since the children of the 'dss-vps' node uses 'reg' properties to identify pipes, it must have '#address-cells'
+and '#size-cells' properties, set to 1 and 0 respectively.
+
+dss-vps {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	/* VP1, managed by tidss */
+	vp@0 {
+		reg = <0>;
+		managed = <1>;
+	};
+
+	/* VP2, not managed by tidss */
+	vp@1 {
+		reg = <1>;
+		managed = <0>;
+	};
+
+	...
+};
+
+Partitioning the common regions
+--------------------------------
+
+Tidss can use at most two common regions
+- mandatory interrupt region
+- optional config region
+
+The common region partitioning is described by the 'dss-commons' node. It must have a child node called
+'interrupt-common' and it may contain an optional node called 'config-common'
+
+'interrupt-common' node must contain a 'reg' property to indicate which common region to be used for interrupt
+handling
+
+'config-common' node must contain a 'reg' property to indicate which common region to be used for device
+configuration. It may contain a 'status' property which can be set to "disabled" when the configuration is
+not to be done.
+
+Since the children of the  'dss-commons' node uses 'reg' properties to identify common regions, it must have
+'#address-cells' and '#size-cells' properties, set to 1 and 0 respectively.
+
+Display sharing
+----------------
+
+To enable display sharing with another compute entity, there must be a 'dss-remote' node. The node must have a
+string property called 'remote-name' which indicates the remote-device name to look for and attach

+ 2 - 1
arch/arm64/boot/dts/ti/Makefile

@@ -21,7 +21,8 @@ dtb-$(CONFIG_ARCH_K3_AM6_SOC) += k3-am654-base-board.dtb \
 	k3-am654-base-board-jailhouse.dtbo
 
 dtb-$(CONFIG_ARCH_K3_J721E_SOC) += k3-j721e-common-proc-board.dtb \
-				   k3-j721e-common-proc-board-infotainment.dtbo
+				   k3-j721e-common-proc-board-infotainment.dtbo \
+				   k3-j721e-common-proc-board-infotainment-display-sharing.dtbo
 
 $(obj)/%.dtbo: $(src)/%.dtso FORCE
 	$(call if_changed_dep,dtc)

+ 114 - 0
arch/arm64/boot/dts/ti/k3-j721e-common-proc-board-infotainment-display-sharing.dtso

@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for J721E ASTC VLAB Model with DSS partitioning support
+ *
+ * Copyright (C) 2016-2018 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+/dts-v1/;
+/plugin/;
+
+/* Used by RTOS to power-up TFP410 via expander */
+&main_i2c1 {
+	status = "disabled";
+};
+
+&dss {
+	pinctrl-names = "none"; /* pinmux configured by RTOS */
+	power-domains = <&k3_pds 152 TI_SCI_PD_SHARED>; /* share IP among VMs and RTOS */
+
+	/* No changes to parents or rates for VP clocks
+	 * if the VP is not owned by us
+	 */
+	assigned-clocks = <&k3_clks 152 1>,
+		<&k3_clks 152 9>,
+		<&k3_clks 152 13>;
+
+	assigned-clock-parents = <&k3_clks 152 2>,	/* PLL16_HSDIV0 */
+		<&k3_clks 152 11>,			/* PLL18_HSDIV0 */
+		<&k3_clks 152 18>;			/* PLL23_HSDIV0 */
+
+	/* partition information */
+	dss_planes: dss-planes {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		/* vid1, marshalled to us by RTOS */
+		plane@0 {
+			reg = <0>;
+			managed = <0>;
+		};
+
+		/* vidl1, Reserved for jailhouse inmate */
+		plane@1 {
+			reg = <1>;
+			managed = <0>;
+		};
+
+		/* vid2, owned by RTOS */
+		plane@2 {
+			reg = <2>;
+			managed = <0>;
+		};
+
+		/* vidl2, marshalled to us by RTOS */
+		plane@3 {
+			reg = <3>;
+			managed = <0>;
+		};
+	};
+
+	dss_vps: dss-vps {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		/* Owned by jailhouse inmate */
+		vp@0 {
+			reg = <0>;
+			managed = <0>;
+		};
+
+		/* Owned by RTOS */
+		vp@1 {
+			reg = <1>;
+			managed = <0>;
+		};
+
+		/* The 2 below are not owned by anyone
+		 * else, so keeping here
+		 */
+		vp@2 {
+			reg = <2>;
+			managed = <1>;
+		};
+
+		vp@3 {
+			reg = <3>;
+			managed = <1>;
+		};
+	};
+
+	dss_commons: dss-commons {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		interrupt-common {
+			reg = <1>;
+		};
+
+		config-common {
+			status = "disabled";
+			reg = <0>;
+		};
+	};
+
+	dss_remote: dss-remote {
+		#address-cells = <0>;
+		#size-cells = <0>;
+
+		remote-name = "r5f-tidss";
+	};
+};
+

+ 2 - 0
drivers/Kconfig

@@ -155,6 +155,8 @@ source "drivers/remoteproc/Kconfig"
 
 source "drivers/rpmsg/Kconfig"
 
+source "drivers/rpmsg-kdrv/Kconfig"
+
 source "drivers/soundwire/Kconfig"
 
 source "drivers/soc/Kconfig"

+ 1 - 0
drivers/Makefile

@@ -155,6 +155,7 @@ obj-$(CONFIG_MAILBOX)		+= mailbox/
 obj-$(CONFIG_HWSPINLOCK)	+= hwspinlock/
 obj-$(CONFIG_REMOTEPROC)	+= remoteproc/
 obj-$(CONFIG_RPMSG)		+= rpmsg/
+obj-$(CONFIG_RPMSG_KDRV)	+= rpmsg-kdrv/
 obj-$(CONFIG_SOUNDWIRE)		+= soundwire/
 
 # Virtualization drivers

+ 5 - 1
drivers/gpu/drm/tidss/Makefile

@@ -6,7 +6,11 @@ tidss-y := tidss_crtc.o \
 	tidss_irq.o \
 	tidss_kms.o \
 	tidss_plane.o \
-	tidss_scale_coefs.o
+	tidss_scale_coefs.o \
+	tidss_v_connector.o \
+	tidss_v_encoder.o \
+	tidss_v_plane.o \
+	tidss_v_crtc.o
 
 tidss-$(CONFIG_DRM_TIDSS_DSS6) += tidss_dispc6.o
 tidss-$(CONFIG_DRM_TIDSS_DSS7) += tidss_dispc7.o

+ 2 - 0
drivers/gpu/drm/tidss/tidss_dispc.h

@@ -134,6 +134,8 @@ struct tidss_dispc_ops {
 	void (*remove)(struct dispc_device *dispc);
 
 	int (*modeset_init)(struct dispc_device *dispc);
+
+	int (*get_irq)(struct dispc_device *dispc);
 };
 
 int dispc6_init(struct tidss_device *tidss);

+ 7 - 0
drivers/gpu/drm/tidss/tidss_dispc6.c

@@ -1368,6 +1368,11 @@ static int dispc6_modeset_init(struct dispc_device *dispc)
 	return 0;
 }
 
+static int dispc6_get_irq(struct dispc_device *dispc)
+{
+	return platform_get_irq(to_platform_device(dispc->tidss->dev), 0);
+}
+
 static void dispc6_remove(struct dispc_device *dispc);
 
 static const struct tidss_dispc_ops dispc6_ops = {
@@ -1405,6 +1410,8 @@ static const struct tidss_dispc_ops dispc6_ops = {
 	.remove = dispc6_remove,
 
 	.modeset_init = dispc6_modeset_init,
+
+	.get_irq = dispc6_get_irq,
 };
 
 static int dispc6_iomap_resource(struct platform_device *pdev, const char *name,

+ 435 - 65
drivers/gpu/drm/tidss/tidss_dispc7.c

@@ -37,6 +37,10 @@ static const struct dispc7_features dispc7_am6_feats = {
 	.min_pclk = 1000,
 	.max_pclk = 200000000,
 
+	.num_commons = 1,
+	.common_name = { "common" },
+	.common_cfg = { true },
+
 	.scaling = {
 		.in_width_max_5tap_rgb = 1280,
 		.in_width_max_3tap_rgb = 2560,
@@ -85,6 +89,10 @@ static const struct dispc7_features dispc7_j721e_feats = {
 	.min_pclk = 1000,
 	.max_pclk = 600000000,
 
+	.num_commons = 4,
+	.common_name = { "common_m", "common_s0", "common_s1", "common_s2" },
+	.common_cfg = { true, false, false, false },
+
 	/* XXX: Scaling features are copied from AM6 and should be checked */
 	.scaling = {
 		.in_width_max_5tap_rgb = 1280,
@@ -141,11 +149,11 @@ static const struct of_device_id dispc7_of_table[] = {
 #define FLD_MOD(orig, val, start, end) \
 	(((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
 
-#define REG_GET(dispc, idx, start, end) \
-	FLD_GET(dispc7_read(dispc, idx), start, end)
+#define CFG_REG_GET(dispc, idx, start, end) \
+	FLD_GET(dispc7_cfg_read(dispc, idx), start, end)
 
-#define REG_FLD_MOD(dispc, idx, val, start, end) \
-	dispc7_write(dispc, idx, FLD_MOD(dispc7_read(dispc, idx), val, start, end))
+#define CFG_REG_FLD_MOD(dispc, idx, val, start, end) \
+	dispc7_cfg_write(dispc, idx, FLD_MOD(dispc7_cfg_read(dispc, idx), val, start, end))
 
 #define VID_REG_GET(dispc, hw_plane, idx, start, end) \
 	FLD_GET(dispc7_vid_read(dispc, hw_plane, idx), start, end)
@@ -247,17 +255,25 @@ struct dispc_device {
 	struct tidss_device *tidss;
 	struct device *dev;
 
-	void __iomem *base_common;
+	void __iomem *base_common_cfg;
+	void __iomem *base_common_intr;
 	void __iomem *base_vid[DISPC7_MAX_PLANES];
 	void __iomem *base_ovr[DISPC7_MAX_PORTS];
 	void __iomem *base_vp[DISPC7_MAX_PORTS];
 
+	int irq;
+
+	bool has_cfg_common;
+
 	struct regmap *syscon;
 
 	struct clk *vp_clk[DISPC7_MAX_PORTS];
 
 	const struct dispc7_features *feat;
 
+	bool vp_managed[DISPC7_MAX_PORTS];
+	bool plane_managed[DISPC7_MAX_PLANES];
+
 	struct clk *fclk;
 
 	bool is_enabled;
@@ -267,15 +283,45 @@ struct dispc_device {
 	struct dss_plane_data plane_data[DISPC7_MAX_PLANES];
 };
 
+#define dispc_for_each_managed_vp(dispc, hw_videoport) \
+	for ((hw_videoport) = 0; (hw_videoport) < (dispc)->feat->num_vps; (hw_videoport)++) \
+		if ((dispc)->vp_managed[(hw_videoport)])
+
+#define dispc_for_each_managed_plane(dispc, hw_plane) \
+	for ((hw_plane) = 0; (hw_plane) < (dispc)->feat->num_planes; (hw_plane)++) \
+		if ((dispc)->plane_managed[(hw_plane)])
 
-static void dispc7_write(struct dispc_device *dispc, u16 reg, u32 val)
+static u32 dispc7_get_next_managed_plane(struct dispc_device *dispc,
+					 u32 *plane_idx)
 {
-	iowrite32(val, dispc->base_common + reg);
+	while (*plane_idx < dispc->feat->num_planes) {
+		u32 plane_id = dispc->feat->vid_order[(*plane_idx)++];
+
+		if (dispc->plane_managed[plane_id])
+			return plane_id;
+	}
+
+	return dispc->feat->num_planes;
 }
 
-static u32 dispc7_read(struct dispc_device *dispc, u16 reg)
+static void dispc7_intr_write(struct dispc_device *dispc, u16 reg, u32 val)
 {
-	return ioread32(dispc->base_common + reg);
+	iowrite32(val, dispc->base_common_intr + reg);
+}
+
+static u32 dispc7_intr_read(struct dispc_device *dispc, u16 reg)
+{
+	return ioread32(dispc->base_common_intr + reg);
+}
+
+static void dispc7_cfg_write(struct dispc_device *dispc, u16 reg, u32 val)
+{
+	iowrite32(val, dispc->base_common_cfg + reg);
+}
+
+static u32 dispc7_cfg_read(struct dispc_device *dispc, u16 reg)
+{
+	return ioread32(dispc->base_common_cfg + reg);
 }
 
 static void dispc7_vid_write(struct dispc_device *dispc, u32 hw_plane, u16 reg, u32 val)
@@ -397,7 +443,7 @@ static u32 dispc7_vid_irq_to_raw(u64 vidstat, u32 hw_plane)
 static u64 dispc7_vp_read_irqstatus(struct dispc_device *dispc,
 				    u32 hw_videoport)
 {
-	u32 stat = dispc7_read(dispc, DISPC_VP_IRQSTATUS(hw_videoport));
+	u32 stat = dispc7_intr_read(dispc, DISPC_VP_IRQSTATUS(hw_videoport));
 
 	return dispc7_vp_irq_from_raw(stat, hw_videoport);
 }
@@ -407,13 +453,13 @@ static void dispc7_vp_write_irqstatus(struct dispc_device *dispc,
 {
 	u32 stat = dispc7_vp_irq_to_raw(vpstat, hw_videoport);
 
-	dispc7_write(dispc, DISPC_VP_IRQSTATUS(hw_videoport), stat);
+	dispc7_intr_write(dispc, DISPC_VP_IRQSTATUS(hw_videoport), stat);
 }
 
 static u64 dispc7_vid_read_irqstatus(struct dispc_device *dispc,
 				     u32 hw_plane)
 {
-	u32 stat = dispc7_read(dispc, DISPC_VID_IRQSTATUS(hw_plane));
+	u32 stat = dispc7_intr_read(dispc, DISPC_VID_IRQSTATUS(hw_plane));
 
 	return dispc7_vid_irq_from_raw(stat, hw_plane);
 }
@@ -423,13 +469,13 @@ static void dispc7_vid_write_irqstatus(struct dispc_device *dispc,
 {
 	u32 stat = dispc7_vid_irq_to_raw(vidstat, hw_plane);
 
-	dispc7_write(dispc, DISPC_VID_IRQSTATUS(hw_plane), stat);
+	dispc7_intr_write(dispc, DISPC_VID_IRQSTATUS(hw_plane), stat);
 }
 
 static u64 dispc7_vp_read_irqenable(struct dispc_device *dispc,
 				    u32 hw_videoport)
 {
-	u32 stat = dispc7_read(dispc, DISPC_VP_IRQENABLE(hw_videoport));
+	u32 stat = dispc7_intr_read(dispc, DISPC_VP_IRQENABLE(hw_videoport));
 
 	return dispc7_vp_irq_from_raw(stat, hw_videoport);
 }
@@ -439,14 +485,14 @@ static void dispc7_vp_write_irqenable(struct dispc_device *dispc,
 {
 	u32 stat = dispc7_vp_irq_to_raw(vpstat, hw_videoport);
 
-	dispc7_write(dispc, DISPC_VP_IRQENABLE(hw_videoport), stat);
+	dispc7_intr_write(dispc, DISPC_VP_IRQENABLE(hw_videoport), stat);
 }
 
 
 static u64 dispc7_vid_read_irqenable(struct dispc_device *dispc,
 				     u32 hw_plane)
 {
-	u32 stat = dispc7_read(dispc, DISPC_VID_IRQENABLE(hw_plane));
+	u32 stat = dispc7_intr_read(dispc, DISPC_VID_IRQENABLE(hw_plane));
 
 	return dispc7_vid_irq_from_raw(stat, hw_plane);
 }
@@ -456,7 +502,7 @@ static void dispc7_vid_write_irqenable(struct dispc_device *dispc,
 {
 	u32 stat = dispc7_vid_irq_to_raw(vidstat, hw_plane);
 
-	dispc7_write(dispc, DISPC_VID_IRQENABLE(hw_plane), stat);
+	dispc7_intr_write(dispc, DISPC_VID_IRQENABLE(hw_plane), stat);
 }
 
 static void dispc7_clear_irqstatus(struct dispc_device *dispc, u64 clearmask)
@@ -464,22 +510,22 @@ static void dispc7_clear_irqstatus(struct dispc_device *dispc, u64 clearmask)
 	unsigned int i;
 	u32 top_clear = 0;
 
-	for (i = 0; i < dispc->feat->num_vps; ++i) {
+	dispc_for_each_managed_vp(dispc, i) {
 		if (clearmask & DSS_IRQ_VP_MASK(i)) {
 			dispc7_vp_write_irqstatus(dispc, i, clearmask);
 			top_clear |= BIT(i);
 		}
 	}
-	for (i = 0; i < dispc->feat->num_planes; ++i) {
+	dispc_for_each_managed_plane(dispc, i) {
 		if (clearmask & DSS_IRQ_PLANE_MASK(i)) {
 			dispc7_vid_write_irqstatus(dispc, i, clearmask);
 			top_clear |= BIT(4 + i);
 		}
 	}
-	dispc7_write(dispc, DISPC_IRQSTATUS, top_clear);
+	dispc7_intr_write(dispc, DISPC_IRQSTATUS, top_clear);
 
 	/* Flush posted writes */
-	dispc7_read(dispc, DISPC_IRQSTATUS);
+	dispc7_intr_read(dispc, DISPC_IRQSTATUS);
 }
 
 static u64 dispc7_read_and_clear_irqstatus(struct dispc_device *dispc)
@@ -487,10 +533,10 @@ static u64 dispc7_read_and_clear_irqstatus(struct dispc_device *dispc)
 	u64 status = 0;
 	unsigned int i;
 
-	for (i = 0; i < dispc->feat->num_vps; ++i)
+	dispc_for_each_managed_vp(dispc, i)
 		status |= dispc7_vp_read_irqstatus(dispc, i);
 
-	for (i = 0; i < dispc->feat->num_planes; ++i)
+	dispc_for_each_managed_plane(dispc, i)
 		status |= dispc7_vid_read_irqstatus(dispc, i);
 
 	dispc7_clear_irqstatus(dispc, status);
@@ -503,10 +549,10 @@ static u64 dispc7_read_irqenable(struct dispc_device *dispc)
 	u64 enable = 0;
 	unsigned int i;
 
-	for (i = 0; i < dispc->feat->num_vps; ++i)
+	dispc_for_each_managed_vp(dispc, i)
 		enable |= dispc7_vp_read_irqenable(dispc, i);
 
-	for (i = 0; i < dispc->feat->num_planes; ++i)
+	dispc_for_each_managed_plane(dispc, i)
 		enable |= dispc7_vid_read_irqenable(dispc, i);
 
 	return enable;
@@ -523,7 +569,7 @@ static void dispc7_write_irqenable(struct dispc_device *dispc, u64 mask)
 	/* clear the irqstatus for newly enabled irqs */
 	dispc7_clear_irqstatus(dispc, (old_mask ^ mask) & mask);
 
-	for (i = 0; i < dispc->feat->num_vps; ++i) {
+	dispc_for_each_managed_vp(dispc, i) {
 		dispc7_vp_write_irqenable(dispc, i, mask);
 		if (mask & DSS_IRQ_VP_MASK(i))
 			main_enable |= BIT(i);		/* VP IRQ */
@@ -531,7 +577,7 @@ static void dispc7_write_irqenable(struct dispc_device *dispc, u64 mask)
 			main_disable |= BIT(i);		/* VP IRQ */
 	}
 
-	for (i = 0; i < dispc->feat->num_planes; ++i) {
+	dispc_for_each_managed_plane(dispc, i) {
 		dispc7_vid_write_irqenable(dispc, i, mask);
 		if (mask & DSS_IRQ_PLANE_MASK(i))
 			main_enable |= BIT(i + 4);	/* VID IRQ */
@@ -540,13 +586,13 @@ static void dispc7_write_irqenable(struct dispc_device *dispc, u64 mask)
 	}
 
 	if (main_enable)
-		dispc7_write(dispc, DISPC_IRQENABLE_SET, main_enable);
+		dispc7_intr_write(dispc, DISPC_IRQENABLE_SET, main_enable);
 
 	if (main_disable)
-		dispc7_write(dispc, DISPC_IRQENABLE_CLR, main_disable);
+		dispc7_intr_write(dispc, DISPC_IRQENABLE_CLR, main_disable);
 
 	/* Flush posted writes */
-	dispc7_read(dispc, DISPC_IRQENABLE_SET);
+	dispc7_intr_read(dispc, DISPC_IRQENABLE_SET);
 }
 
 enum dispc7_oldi_mode { SPWG_18 = 0, JEIDA_24 = 1, SPWG_24 = 2 };
@@ -658,11 +704,11 @@ static void dispc7_enable_oldi(struct dispc_device *dispc, u32 hw_videoport,
 
 	dispc7_vp_write(dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, oldi_cfg);
 
-	while (!(oldi_reset_bit & dispc7_read(dispc, DSS_SYSSTATUS)) &&
+	while (!(oldi_reset_bit & dispc7_cfg_read(dispc, DSS_SYSSTATUS)) &&
 	       count < 10000)
 		count++;
 
-	if (!(oldi_reset_bit & dispc7_read(dispc, DSS_SYSSTATUS)))
+	if (!(oldi_reset_bit & dispc7_cfg_read(dispc, DSS_SYSSTATUS)))
 		dev_warn(dispc->dev, "%s: timeout waiting OLDI reset done\n",
 			 __func__);
 }
@@ -685,7 +731,8 @@ static void dispc7_vp_prepare(struct dispc_device *dispc, u32 hw_videoport,
 	if (dispc->feat->vp_bus_type[hw_videoport] == DISPC7_VP_OLDI) {
 		dispc7_oldi_tx_power(dispc, true);
 
-		dispc7_enable_oldi(dispc, hw_videoport, fmt);
+		if (dispc->has_cfg_common)
+			dispc7_enable_oldi(dispc, hw_videoport, fmt);
 	}
 }
 
@@ -1788,12 +1835,16 @@ static void dispc7_mflag_setup(struct dispc_device *dispc)
 {
 	unsigned int i;
 
+	if (!dispc->has_cfg_common)
+		goto no_cfg;
+
 	/* MFLAG_CTRL = ENABLED */
-	REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0);
+	CFG_REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0);
 	/* MFLAG_START = MFLAGNORMALSTARTMODE */
-	REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
+	CFG_REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
 
-	for (i = 0; i < dispc->feat->num_planes; i++)
+no_cfg:
+	dispc_for_each_managed_plane(dispc, i)
 		dispc7_vid_mflag_setup(dispc, i);
 }
 
@@ -1804,7 +1855,7 @@ static void dispc7_plane_init(struct dispc_device *dispc)
 	dev_dbg(dispc->dev, "%s()\n", __func__);
 
 	/* FIFO underflows when scaling if preload is not high enough */
-	for (i = 0; i < dispc->feat->num_planes; i++)
+	dispc_for_each_managed_plane(dispc, i)
 		if (!dispc->feat->vid_lite[i])
 			VID_REG_FLD_MOD(dispc, i, DISPC_VID_PRELOAD,
 					0x7FF, 11, 0);
@@ -1817,7 +1868,7 @@ static void dispc7_vp_init(struct dispc_device *dispc)
 	dev_dbg(dispc->dev, "%s()\n", __func__);
 
 	/* Enable the gamma Shadow bit-field for all VPs*/
-	for (i = 0; i < dispc->feat->num_vps; i++)
+	dispc_for_each_managed_vp(dispc, i)
 		VP_REG_FLD_MOD(dispc, i, DISPC_VP_CONFIG, 1, 2, 2);
 }
 
@@ -1828,8 +1879,8 @@ static void dispc7_initial_config(struct dispc_device *dispc)
 	dispc7_vp_init(dispc);
 
 	/* Note: Harcdoded DPI routing on J721E for now */
-	if (dispc->feat->subrev == DSS7_J721E) {
-		dispc7_write(dispc, DISPC_CONNECTIONS,
+	if (dispc->feat->subrev == DSS7_J721E && dispc->has_cfg_common) {
+		dispc7_cfg_write(dispc, DISPC_CONNECTIONS,
 			     FLD_VAL(2, 3, 0) |		/* VP1 to DPI0 */
 			     FLD_VAL(8, 7, 4)		/* VP3 to DPI1 */
 			    );
@@ -1887,7 +1938,7 @@ static void dispc7_restore_gamma_tables(struct dispc_device *dispc)
 
 	dev_dbg(dispc->dev, "%s()\n", __func__);
 
-	for (i = 0; i < dispc->feat->num_vps; i++)
+	dispc_for_each_managed_vp(dispc, i)
 		dispc7_vp_write_gamma_table(dispc, i);
 }
 
@@ -2039,7 +2090,7 @@ static int dispc7_init_gamma_tables(struct dispc_device *dispc)
 
 	dev_dbg(dispc->dev, "%s()\n", __func__);
 
-	for (i = 0; i < dispc->feat->num_vps; i++)
+	dispc_for_each_managed_vp(dispc, i)
 		dispc7_vp_set_gamma(dispc, i, NULL, 0);
 
 	return 0;
@@ -2078,25 +2129,29 @@ static int dispc7_runtime_resume(struct dispc_device *dispc)
 
 	clk_prepare_enable(dispc->fclk);
 
-	if (REG_GET(dispc, DSS_SYSSTATUS, 0, 0) == 0)
+	if (!dispc->has_cfg_common)
+		goto no_cfg;
+
+	if (CFG_REG_GET(dispc, DSS_SYSSTATUS, 0, 0) == 0)
 		dev_warn(dispc->dev, "DSS FUNC RESET not done!\n");
 
 	dev_dbg(dispc->dev, "OMAP DSS7 rev 0x%x\n",
-		dispc7_read(dispc, DSS_REVISION));
+		dispc7_cfg_read(dispc, DSS_REVISION));
 
 	dev_dbg(dispc->dev, "VP RESETDONE %d,%d,%d\n",
-		REG_GET(dispc, DSS_SYSSTATUS, 1, 1),
-		REG_GET(dispc, DSS_SYSSTATUS, 2, 2),
-		REG_GET(dispc, DSS_SYSSTATUS, 3, 3));
+		CFG_REG_GET(dispc, DSS_SYSSTATUS, 1, 1),
+		CFG_REG_GET(dispc, DSS_SYSSTATUS, 2, 2),
+		CFG_REG_GET(dispc, DSS_SYSSTATUS, 3, 3));
 
 	dev_dbg(dispc->dev, "OLDI RESETDONE %d,%d,%d\n",
-		REG_GET(dispc, DSS_SYSSTATUS, 5, 5),
-		REG_GET(dispc, DSS_SYSSTATUS, 6, 6),
-		REG_GET(dispc, DSS_SYSSTATUS, 7, 7));
+		CFG_REG_GET(dispc, DSS_SYSSTATUS, 5, 5),
+		CFG_REG_GET(dispc, DSS_SYSSTATUS, 6, 6),
+		CFG_REG_GET(dispc, DSS_SYSSTATUS, 7, 7));
 
 	dev_dbg(dispc->dev, "DISPC IDLE %d\n",
-		REG_GET(dispc, DSS_SYSSTATUS, 9, 9));
+		CFG_REG_GET(dispc, DSS_SYSSTATUS, 9, 9));
 
+no_cfg:
 	dispc7_initial_config(dispc);
 
 	dispc7_restore_gamma_tables(dispc);
@@ -2119,11 +2174,9 @@ static int dispc7_modeset_init(struct dispc_device *dispc)
 		u32 enc_type;
 	};
 
-	u32 max_vps = dispc->feat->num_vps;
-	u32 max_planes = dispc->feat->num_planes;
-
 	struct pipe pipes[DISPC7_MAX_PORTS];
 	u32 num_pipes = 0;
+	u32 plane_idx = 0;
 	u32 crtc_mask;
 
 	num_fourccs = 0;
@@ -2136,8 +2189,9 @@ static int dispc7_modeset_init(struct dispc_device *dispc)
 	}
 
 	/* first find all the connected panels & bridges */
+	/* exclude the VPs that are not managed.         */
 
-	for (i = 0; i < max_vps; i++) {
+	dispc_for_each_managed_vp(dispc, i) {
 		struct drm_panel *panel;
 		struct drm_bridge *bridge;
 		u32 enc_type = DRM_MODE_ENCODER_NONE;
@@ -2195,9 +2249,15 @@ static int dispc7_modeset_init(struct dispc_device *dispc)
 		struct tidss_plane *tplane;
 		struct tidss_crtc *tcrtc;
 		struct drm_encoder *enc;
-		u32 hw_plane_id = dispc->feat->vid_order[tidss->num_planes];
+		u32 hw_plane_id;
 		int ret;
 
+		hw_plane_id = dispc7_get_next_managed_plane(dispc, &plane_idx);
+		if (hw_plane_id == dispc->feat->num_planes) {
+			dev_err(tidss->dev, "no managed HW plane found for CRTC\n");
+			return -EINVAL;
+		}
+
 		tplane = tidss_plane_create(tidss, hw_plane_id,
 					    DRM_PLANE_TYPE_PRIMARY, crtc_mask,
 					    fourccs, num_fourccs);
@@ -2206,6 +2266,8 @@ static int dispc7_modeset_init(struct dispc_device *dispc)
 			return PTR_ERR(tplane);
 		}
 
+		hw_plane_id++;
+
 		tidss->planes[tidss->num_planes++] = &tplane->plane;
 
 		tcrtc = tidss_crtc_create(tidss, pipes[i].hw_videoport,
@@ -2233,9 +2295,13 @@ static int dispc7_modeset_init(struct dispc_device *dispc)
 
 	/* create overlay planes of the leftover planes */
 
-	while (tidss->num_planes < max_planes) {
+	while (tidss->num_planes < dispc->feat->num_planes) {
 		struct tidss_plane *tplane;
-		u32 hw_plane_id = dispc->feat->vid_order[tidss->num_planes];
+		u32 hw_plane_id;
+
+		hw_plane_id = dispc7_get_next_managed_plane(dispc, &plane_idx);
+		if (hw_plane_id == dispc->feat->num_planes)
+			break;
 
 		tplane = tidss_plane_create(tidss, hw_plane_id,
 					    DRM_PLANE_TYPE_OVERLAY, crtc_mask,
@@ -2246,12 +2312,19 @@ static int dispc7_modeset_init(struct dispc_device *dispc)
 			return PTR_ERR(tplane);
 		}
 
+		hw_plane_id++;
+
 		tidss->planes[tidss->num_planes++] = &tplane->plane;
 	}
 
 	return 0;
 }
 
+static int dispc7_get_irq(struct dispc_device *dispc)
+{
+	return dispc->irq;
+}
+
 static void dispc7_remove(struct dispc_device *dispc)
 {
 	struct device *dev = dispc->dev;
@@ -2299,6 +2372,8 @@ static const struct tidss_dispc_ops dispc7_ops = {
 	.remove = dispc7_remove,
 
 	.modeset_init = dispc7_modeset_init,
+
+	.get_irq = dispc7_get_irq,
 };
 
 static int dispc7_iomap_resource(struct platform_device *pdev, const char *name,
@@ -2324,13 +2399,302 @@ static int dispc7_iomap_resource(struct platform_device *pdev, const char *name,
 	return 0;
 }
 
+static int dispc_j721e_get_managed_common_intr(struct dispc_device *dispc,
+		u32 *intr)
+{
+	int ret;
+	struct tidss_device *tidss = dispc->tidss;
+	struct device *dev = tidss->dev;
+	struct device_node *dss_commons_node;
+	struct device_node *intr_node;
+	u32 value;
+
+	dss_commons_node = of_get_child_by_name(dev->of_node, "dss-commons");
+	if (!dss_commons_node) {
+		*intr = 0;
+		return 0;
+	}
+
+	intr_node = of_get_child_by_name(dss_commons_node, "interrupt-common");
+	if (!intr_node) {
+		ret = -ENOENT;
+		goto out;
+	}
+
+	ret = of_property_read_u32(intr_node, "reg", &value);
+	if (ret)
+		goto out2;
+
+	*intr = value;
+
+out2:
+	of_node_put(intr_node);
+out:
+	of_node_put(dss_commons_node);
+	return ret;
+}
+
+static int dispc_j721e_get_managed_common_cfg(struct dispc_device *dispc,
+		u32 *cfg)
+{
+	int ret;
+	struct tidss_device *tidss = dispc->tidss;
+	struct device *dev = tidss->dev;
+	struct device_node *dss_commons_node;
+	struct device_node *cfg_node;
+	u32 value;
+
+	dss_commons_node = of_get_child_by_name(dev->of_node, "dss-commons");
+	if (!dss_commons_node) {
+		*cfg = 0;
+		return 0;
+	}
+
+	cfg_node = of_get_child_by_name(dss_commons_node, "config-common");
+	if (!cfg_node) {
+		ret = -ENOENT;
+		goto out;
+	}
+
+	if (!of_device_is_available(cfg_node)) {
+		ret = -ENOENT;
+		goto out;
+	}
+
+	ret = of_property_read_u32(cfg_node, "reg", &value);
+	if (ret)
+		goto out2;
+
+	*cfg = value;
+
+out2:
+	of_node_put(cfg_node);
+out:
+	of_node_put(dss_commons_node);
+	return ret;
+}
+
+/*
+ * The logic for J721E is simple:
+ * 1. Must find an interrupt common, the driver cannot work
+ *    without one.
+ *
+ *    If the dss device-tree node does not have a subnode
+ *    "dss-commons", assume tidss is the only module handling
+ *    DSS and therefore use intr_common = COMMON_M.
+ *
+ *    If "dss-commons" subnode is present, then it must have
+ *    a child node "interrupt-common", or else fail. And then,
+ *    interrupt-common must have a "reg" property that
+ *    indicates which common area to use for interrupts. Must
+ *    be 0 to feat->num_commons
+ *
+ * 2. Optionally, find a configuration region, or make certain
+ *    assumptions and proceed
+ *
+ *    If the dss has a remote device, assume tidss is a slave
+ *    and do not search for a config_common
+ *
+ *    If the dss device-tree node does not have a
+ *    subnode called "dss-commons", assume tidss is the only
+ *    module handling DSS and therefore use config_common =
+ *    COMMON_M.
+ *
+ *    If "dss-commons" is present, search for a child
+ *    "config-common". If no such child is present, tidss
+ *    assumes that DSS is early-configured and does not
+ *    search for a config-common.
+ *
+ *    If "config-common" child is present, then it must have a
+ *    "reg" property that indicates which common area to use for
+ *    configuration. Must be COMMON_M
+ */
+static int dispc7_j721e_setup_commons(struct dispc_device *dispc)
+{
+	int r;
+	struct tidss_device *tidss = dispc->tidss;
+	struct device *dev = tidss->dev;
+	struct platform_device *pdev = to_platform_device(dev);
+	u32 common_intr_id, common_cfg_id;
+
+	r = dispc_j721e_get_managed_common_intr(dispc, &common_intr_id);
+	if (r || common_intr_id >= dispc->feat->num_commons)
+		return -EINVAL;
+
+	r = dispc7_iomap_resource(pdev, dispc->feat->common_name[common_intr_id],
+			&dispc->base_common_intr);
+	if (r)
+		return r;
+
+	dispc->irq = platform_get_irq(pdev, common_intr_id);
+	if (dispc->irq < 0)
+		return dispc->irq;
+
+	if (tidss->rdev) {
+		dev_dbg(dev, "%s: continuing with remote device\n", __func__);
+		dispc->has_cfg_common = false;
+		return 0;
+	}
+
+	r = dispc_j721e_get_managed_common_cfg(dispc, &common_cfg_id);
+	if (r) {
+		dev_dbg(dev, "%s: continuing without configuration common\n", __func__);
+		dispc->has_cfg_common = false;
+		return 0;
+	}
+
+	if (common_cfg_id >= dispc->feat->num_commons ||
+			!dispc->feat->common_cfg[common_cfg_id])
+		return -EINVAL;
+
+	if (common_intr_id == common_cfg_id)
+		dispc->base_common_cfg = dispc->base_common_intr;
+	else {
+		r = dispc7_iomap_resource(pdev, dispc->feat->common_name[common_cfg_id],
+				&dispc->base_common_cfg);
+		if (r)
+			return r;
+	}
+
+	dispc->has_cfg_common = true;
+
+	return 0;
+}
+
+static int dispc7_am6_setup_commons(struct dispc_device *dispc)
+{
+	int r;
+	struct tidss_device *tidss = dispc->tidss;
+	struct platform_device *pdev = to_platform_device(tidss->dev);
+
+	r = dispc7_iomap_resource(pdev, "common", &dispc->base_common_cfg);
+	if (r)
+		return r;
+
+	dispc->base_common_intr = dispc->base_common_cfg;
+
+	dispc->irq = platform_get_irq(pdev, 0);
+	if (dispc->irq < 0)
+		return dispc->irq;
+
+	dispc->has_cfg_common = true;
+
+	return 0;
+}
+
+static int dispc7_setup_commons(struct dispc_device *dispc)
+{
+	switch (dispc->feat->subrev) {
+	case DSS7_AM6:
+		return dispc7_am6_setup_commons(dispc);
+	case DSS7_J721E:
+		return dispc7_j721e_setup_commons(dispc);
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+}
+
+static struct device_node *dispc7_of_dss_plane_for_id(struct device_node *parent, u32 id)
+{
+	struct device_node *dss_planes_node, *plane;
+
+	dss_planes_node = of_get_child_by_name(parent, "dss-planes");
+	if (!dss_planes_node)
+		return NULL;
+
+	for_each_child_of_node(dss_planes_node, plane) {
+		u32 plane_id = 0;
+
+		if (of_node_cmp(plane->name, "plane") != 0)
+			continue;
+		of_property_read_u32(plane, "reg", &plane_id);
+		if (id == plane_id)
+			break;
+	}
+
+	of_node_put(dss_planes_node);
+
+	return plane;
+}
+
+static struct device_node *dispc7_of_dss_vp_for_id(struct device_node *parent, u32 id)
+{
+	struct device_node *dss_vps_node, *vp;
+
+	dss_vps_node = of_get_child_by_name(parent, "dss-vps");
+	if (!dss_vps_node)
+		return NULL;
+
+	for_each_child_of_node(dss_vps_node, vp) {
+		u32 vp_id = 0;
+
+		if (of_node_cmp(vp->name, "vp") != 0)
+			continue;
+		of_property_read_u32(vp, "reg", &vp_id);
+		if (id == vp_id)
+			break;
+	}
+
+	of_node_put(dss_vps_node);
+
+	return vp;
+}
+
+static bool dispc7_is_plane_managed(struct tidss_device *tidss, u32 plane_id)
+{
+	struct device *dev = tidss->dev;
+	struct device_node *plane;
+	u32 managed;
+	bool ret;
+
+	plane = dispc7_of_dss_plane_for_id(dev->of_node, plane_id);
+	if (!plane)
+		return true;
+
+	ret = true;
+
+	if (of_property_read_u32(plane, "managed", &managed))
+		goto out;
+
+	if (!managed)
+		ret = false;
+
+out:
+	of_node_put(plane);
+	return ret;
+}
+
+static bool dispc7_is_vp_managed(struct tidss_device *tidss, u32 vp_id)
+{
+	struct device *dev = tidss->dev;
+	struct device_node *vp;
+	u32 managed;
+	bool ret;
+
+	vp = dispc7_of_dss_vp_for_id(dev->of_node, vp_id);
+	if (!vp)
+		return true;
+
+	ret = true;
+
+	if (of_property_read_u32(vp, "managed", &managed))
+		goto out;
+
+	if (!managed)
+		ret = false;
+
+out:
+	of_node_put(vp);
+	return ret;
+}
+
 int dispc7_init(struct tidss_device *tidss)
 {
 	struct device *dev = tidss->dev;
 	struct platform_device *pdev = to_platform_device(dev);
 	struct dispc_device *dispc;
 	const struct dispc7_features *feat;
-	const char *common_name;
 	unsigned int i;
 	int r = 0;
 
@@ -2353,22 +2717,28 @@ int dispc7_init(struct tidss_device *tidss)
 	switch (feat->subrev) {
 	case DSS7_AM6:
 		dispc7_common_regmap = tidss_am6_common_regs;
-		common_name = "common";
 		break;
 	case DSS7_J721E:
 		dispc7_common_regmap = tidss_j721e_common_regs;
-		common_name = "common_m";
 		break;
 	default:
 		WARN_ON(1);
 		return -EINVAL;
 	}
 
-	r = dispc7_iomap_resource(pdev, common_name, &dispc->base_common);
-	if (r)
+	r = dispc7_setup_commons(dispc);
+	if (r) {
+		dev_err(dev, "%s: could not setup common regions\n", __func__);
 		return r;
+	}
+
+	for (i = 0; i < dispc->feat->num_vps; i++)
+		dispc->vp_managed[i] = dispc7_is_vp_managed(tidss, i);
+
+	for (i = 0; i < dispc->feat->num_planes; i++)
+		dispc->plane_managed[i] = dispc7_is_plane_managed(tidss, i);
 
-	for (i = 0; i < dispc->feat->num_planes; i++) {
+	dispc_for_each_managed_plane(dispc, i) {
 		r = dispc7_iomap_resource(pdev, dispc->feat->vid_name[i],
 					  &dispc->base_vid[i]);
 		dev_dbg(dev, "%s: %u %s %d\n", __func__,
@@ -2377,7 +2747,7 @@ int dispc7_init(struct tidss_device *tidss)
 			return r;
 	}
 
-	for (i = 0; i < dispc->feat->num_vps; i++) {
+	dispc_for_each_managed_vp(dispc, i) {
 		struct clk *clk;
 
 		r = dispc7_iomap_resource(pdev, dispc->feat->ovr_name[i],

+ 5 - 0
drivers/gpu/drm/tidss/tidss_dispc7.h

@@ -7,6 +7,7 @@
 #ifndef __TIDSS_DISPC7_H
 #define __TIDSS_DISPC7_H
 
+#define DISPC7_MAX_COMMONS	4
 #define DISPC7_MAX_PORTS	4
 #define DISPC7_MAX_PLANES	4
 
@@ -40,6 +41,10 @@ struct dispc7_features {
 	unsigned long min_pclk;
 	unsigned long max_pclk;
 
+	u32 num_commons;
+	const char *common_name[DISPC7_MAX_COMMONS];
+	bool common_cfg[DISPC7_MAX_COMMONS];
+
 	struct dispc7_features_scaling scaling;
 
 	enum dispc7_dss_subrevision subrev;

+ 72 - 4
drivers/gpu/drm/tidss/tidss_drv.c

@@ -117,6 +117,64 @@ static const struct dev_pm_ops tidss_pm_ops = {
  * Platform driver
  */
 
+static int tidss_init_remote_device(struct tidss_device *tidss)
+{
+	int ret;
+	struct device *dev = tidss->dev;
+	struct device_node *dss_remote_dev_node;
+	const char *name;
+
+	dss_remote_dev_node = of_get_child_by_name(dev->of_node, "dss-remote");
+	if (!dss_remote_dev_node)
+		return 0;
+
+	if (!of_find_property(dss_remote_dev_node, "remote-name", NULL)) {
+		ret = 0;
+		goto out;
+	}
+
+	ret = of_property_read_string(dss_remote_dev_node, "remote-name", &name);
+	if (ret) {
+		dev_err(dev, "%s: could not read remote-name property\n", __func__);
+		goto out;
+	}
+
+	tidss->rdev = rpmsg_remotedev_get_named_device(name);
+	if (!tidss->rdev) {
+		ret = -EPROBE_DEFER;
+		goto out;
+	} else if (IS_ERR(tidss->rdev)) {
+		ret = PTR_ERR(tidss->rdev);
+		goto out;
+	}
+
+	tidss->rdev->cb_data = tidss;
+
+	if (tidss->rdev->device.display.ops->ready == NULL ||
+			tidss->rdev->device.display.ops->get_res_info == NULL ||
+			tidss->rdev->device.display.ops->commit == NULL) {
+		dev_err(dev, "%s: rpmsg remotedev ops not complete\n", __func__);
+		ret = -EINVAL;
+		goto disconnect;
+	}
+
+	/* Cant really do much if the remotedev is not ready yet */
+	if (!tidss->rdev->device.display.ops->ready(tidss->rdev)) {
+		ret = -EPROBE_DEFER;
+		goto disconnect;
+	}
+
+	goto out;
+
+disconnect:
+	rpmsg_remotedev_put_device(tidss->rdev);
+	tidss->rdev->cb_data = NULL;
+	tidss->rdev = NULL;
+out:
+	of_node_put(dss_remote_dev_node);
+	return ret;
+}
+
 static int tidss_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -134,11 +192,17 @@ static int tidss_probe(struct platform_device *pdev)
 	tidss->dev = dev;
 	tidss->features = of_device_get_match_data(dev);
 
+	ret = tidss_init_remote_device(tidss);
+	if (ret)
+		return ret;
+
 	platform_set_drvdata(pdev, tidss);
 
 	ddev = drm_dev_alloc(&tidss_driver, dev);
-	if (IS_ERR(ddev))
-		return PTR_ERR(ddev);
+	if (IS_ERR(ddev)) {
+		ret = PTR_ERR(ddev);
+		goto err_fini_rdev;
+	}
 
 	tidss->ddev = ddev;
 	ddev->dev_private = tidss;
@@ -163,10 +227,10 @@ static int tidss_probe(struct platform_device *pdev)
 		goto err_runtime_suspend;
 	}
 
-	irq = platform_get_irq(pdev, 0);
+	irq = tidss->dispc_ops->get_irq(tidss->dispc);
 	if (irq < 0) {
 		ret = irq;
-		dev_err(dev, "platform_get_irq failed: %d\n", ret);
+		dev_err(dev, "failed to get dispc irq: %d\n", ret);
 		goto err_modeset_cleanup;
 	}
 
@@ -211,6 +275,10 @@ err_runtime_suspend:
 err_disable_pm:
 	pm_runtime_disable(dev);
 
+err_fini_rdev:
+	if (tidss->rdev)
+		rpmsg_remotedev_put_device(tidss->rdev);
+
 	drm_dev_put(ddev);
 
 	return ret;

+ 6 - 0
drivers/gpu/drm/tidss/tidss_drv.h

@@ -8,11 +8,15 @@
 #define __TIDSS_DRV_H__
 
 #include <linux/spinlock.h>
+#include <linux/rpmsg-remotedev/rpmsg-remotedev.h>
 
 struct tidss_device {
 	struct device *dev;		/* Underlying DSS device */
 	struct drm_device *ddev;	/* DRM device for DSS */
 
+	struct rpmsg_remotedev *rdev;
+	struct rpmsg_remotedev_display_resinfo rres;
+
 	struct drm_fbdev_cma *fbdev;
 
 	struct dispc_device *dispc;
@@ -21,7 +25,9 @@ struct tidss_device {
 	const struct tidss_features *features;
 
 	unsigned int num_crtcs;
+	unsigned int num_v_crtcs;
 	struct drm_crtc *crtcs[8];
+	struct drm_crtc *v_crtcs[8];
 
 	unsigned int num_planes;
 	struct drm_plane *planes[8];

+ 99 - 3
drivers/gpu/drm/tidss/tidss_kms.c

@@ -16,15 +16,33 @@
 #include "tidss_encoder.h"
 #include "tidss_kms.h"
 #include "tidss_plane.h"
+#include "tidss_v_display.h"
+
+static bool tidss_is_v_crtc(struct drm_crtc *crtc)
+{
+	int i;
+	struct drm_device *dev = crtc->dev;
+	struct tidss_device *tidss = dev->dev_private;
+
+	for (i = 0; i < tidss->num_v_crtcs; i++)
+		if (crtc == tidss->v_crtcs[i])
+			return true;
+	return false;
+}
 
 static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
 {
 	struct drm_device *ddev = old_state->dev;
 	struct tidss_device *tidss = ddev->dev_private;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+	int i;
 
 	dev_dbg(ddev->dev, "%s\n", __func__);
 
-	tidss->dispc_ops->runtime_get(tidss->dispc);
+	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i)
+		if (!tidss_is_v_crtc(crtc))
+			tidss->dispc_ops->runtime_get(tidss->dispc);
 
 	drm_atomic_helper_commit_modeset_disables(ddev, old_state);
 	drm_atomic_helper_commit_planes(ddev, old_state, 0);
@@ -35,7 +53,9 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
 
 	drm_atomic_helper_cleanup_planes(ddev, old_state);
 
-	tidss->dispc_ops->runtime_put(tidss->dispc);
+	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i)
+		if (!tidss_is_v_crtc(crtc))
+			tidss->dispc_ops->runtime_put(tidss->dispc);
 }
 
 static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
@@ -53,6 +73,66 @@ static int tidss_modeset_init_properties(struct tidss_device *tidss)
 	return 0;
 }
 
+static struct drm_crtc *tidss_v_modeset_init_v_crtc(struct tidss_device *tidss, struct rpmsg_remotedev_display_disp *vp)
+{
+	struct drm_device *dev = tidss->ddev;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	struct drm_plane *planes[8];
+	struct drm_crtc *crtc;
+	int last_crtc = tidss->num_crtcs + tidss->num_v_crtcs;
+	int num_planes = 0;
+	int p;
+
+	encoder = v_encoder_init(tidss, last_crtc, 1 << last_crtc, vp);
+	if (!encoder) {
+		dev_err(dev->dev, "could not create encoder: %u\n", last_crtc);
+		return NULL;
+	}
+
+	connector = v_connector_init(tidss, last_crtc, encoder, vp);
+	if (!connector) {
+		dev_err(dev->dev, "could not create connector: %u\n", last_crtc);
+		goto connector_fail;
+	}
+
+	for (p = 0; p < vp->num_pipes; p++) {
+		int plane_id = p << 8 | last_crtc;
+
+		planes[p] = v_plane_init(tidss, plane_id, (1 << last_crtc),
+				p == 0 ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY,
+				vp, p);
+		if (!planes[p]) {
+			dev_err(dev->dev, "could not create plane: %u\n", plane_id);
+			goto plane_fail;
+		}
+		num_planes++;
+	}
+
+	crtc = v_crtc_init(tidss, last_crtc, planes[0], vp);
+	if (!crtc) {
+		dev_err(dev->dev, "could not create crtc: %d\n", last_crtc);
+		goto plane_fail;
+	}
+
+	tidss->num_v_crtcs++;
+
+	return crtc;
+
+plane_fail:
+	for (p = 0; p < num_planes; p++)
+		v_plane_fini(tidss, planes[p]);
+	v_connector_fini(tidss, connector);
+connector_fail:
+	v_encoder_fini(tidss, encoder);
+	return NULL;
+}
+
+struct rpmsg_remotedev_display_cb tidss_rdev_cb  = {
+	.commit_done = v_crtc_commit_done,
+	.buffer_done = v_crtc_buffer_done,
+};
+
 int tidss_modeset_init(struct tidss_device *tidss)
 {
 	struct drm_device *ddev = tidss->ddev;
@@ -79,7 +159,20 @@ int tidss_modeset_init(struct tidss_device *tidss)
 	if (ret)
 		return ret;
 
-	ret = drm_vblank_init(ddev, tidss->num_crtcs);
+	if (tidss->rdev) {
+		tidss->rdev->device.display.ops->get_res_info(tidss->rdev, &tidss->rres);
+
+		if (tidss->rres.num_disps)
+			tidss->rdev->device.display.cb_ops = &tidss_rdev_cb;
+
+		for (i = 0; i < tidss->rres.num_disps; i++) {
+			tidss->v_crtcs[i] = tidss_v_modeset_init_v_crtc(tidss, &tidss->rres.disps[i]);
+			if (!tidss->v_crtcs[i])
+				return -ENOMEM;
+		}
+	}
+
+	ret = drm_vblank_init(ddev, tidss->num_crtcs + tidss->num_v_crtcs);
 	if (ret)
 		return ret;
 
@@ -87,6 +180,9 @@ int tidss_modeset_init(struct tidss_device *tidss)
 	for (i = 0; i < tidss->num_crtcs; ++i)
 		drm_crtc_vblank_reset(tidss->crtcs[i]);
 
+	for (i = 0; i < tidss->num_v_crtcs; ++i)
+		drm_crtc_vblank_reset(tidss->v_crtcs[i]);
+
 	drm_mode_config_reset(ddev);
 
 	dev_dbg(tidss->dev, "%s done\n", __func__);

+ 144 - 0
drivers/gpu/drm/tidss/tidss_v_connector.c

@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Subhajit Paul <subhajit_paul@ti.com>
+ */
+
+/*
+ * Virtual connectors : reuse connector_init logic from
+ * drivers/gpu/drm/bridge/panel.c
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include "tidss_v_display.h"
+
+struct v_connector {
+	struct drm_connector base;
+	struct tidss_device *tidss;
+	int id;
+	struct drm_encoder *encoder;
+	struct drm_display_mode mode;
+};
+
+#define to_v_connector(x) container_of(x, struct v_connector, base)
+
+static int v_connector_get_modes(struct drm_connector *connector)
+{
+	struct v_connector *v_connector = to_v_connector(connector);
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *mode;
+	const struct drm_display_mode *m = &v_connector->mode;
+
+	dev_dbg(connector->dev->dev, "%s\n", __func__);
+
+	mode = drm_mode_duplicate(dev, m);
+	if (!mode) {
+		dev_err(dev->dev, "failed to add mode %ux%u@%u\n",
+				m->hdisplay, m->vdisplay, m->vrefresh);
+		return 0;
+	}
+
+	mode->type |= (DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED);
+
+	drm_mode_set_name(mode);
+
+	drm_mode_probed_add(connector, mode);
+
+	return 1;
+}
+
+static const struct drm_connector_helper_funcs v_connector_helper_funcs = {
+	.get_modes = v_connector_get_modes,
+};
+
+static void v_connector_destroy(struct drm_connector *connector)
+{
+	struct v_connector *v_connector = to_v_connector(connector);
+
+	dev_dbg(connector->dev->dev, "%s\n", __func__);
+
+	drm_connector_cleanup(connector);
+	kfree(v_connector);
+}
+
+static const struct drm_connector_funcs v_connector_funcs = {
+	.reset = drm_atomic_helper_connector_reset,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = v_connector_destroy,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+struct drm_connector *v_connector_init(struct tidss_device *tidss, int id,
+		struct drm_encoder *encoder, struct rpmsg_remotedev_display_disp *vp)
+{
+	struct drm_device *dev = tidss->ddev;
+	struct drm_connector *connector = NULL;
+	struct v_connector *v_connector;
+	unsigned int hfp, hbp, hsw, vfp, vbp, vsw;
+
+	v_connector = kzalloc(sizeof(struct v_connector), GFP_KERNEL);
+	if (!v_connector)
+		return NULL;
+
+	hfp = vp->width / 100;
+	if (!hfp)
+		hfp = 1;
+
+	hbp = hfp / 2;
+	if (!hbp)
+		hbp = 1;
+
+	hsw = hbp / 2;
+	if (!hsw)
+		hsw = 1;
+
+	vfp = vp->height / 100;
+	if (!vfp)
+		vfp = 1;
+
+	vbp = vfp / 2;
+	if (!vbp)
+		vbp = 1;
+
+	vsw = vbp / 2;
+	if (!vsw)
+		vsw = 1;
+
+	v_connector->tidss = tidss;
+
+	v_connector->mode.hdisplay = vp->width;
+	v_connector->mode.hsync_start = v_connector->mode.hdisplay + hfp;
+	v_connector->mode.hsync_end = v_connector->mode.hsync_start + hsw;
+	v_connector->mode.htotal = v_connector->mode.hsync_end + hbp;
+
+	v_connector->mode.vdisplay = vp->height;
+	v_connector->mode.vsync_start = v_connector->mode.vdisplay + vfp;
+	v_connector->mode.vsync_end = v_connector->mode.vsync_start + vsw;
+	v_connector->mode.vtotal = v_connector->mode.vsync_end + vbp;
+
+	v_connector->mode.vrefresh = vp->refresh;
+
+	v_connector->mode.clock = (v_connector->mode.vtotal * v_connector->mode.htotal * v_connector->mode.vrefresh) / 1000;
+
+	v_connector->encoder = encoder;
+	v_connector->id = id;
+
+	connector = &v_connector->base;
+
+	drm_connector_init(dev, connector, &v_connector_funcs,
+				DRM_MODE_CONNECTOR_VIRTUAL);
+	drm_connector_helper_add(connector, &v_connector_helper_funcs);
+
+	drm_connector_attach_encoder(connector, encoder);
+
+	return connector;
+}
+
+void v_connector_fini(struct tidss_device *tidss, struct drm_connector *connector)
+{
+	v_connector_destroy(connector);
+}

+ 488 - 0
drivers/gpu/drm/tidss/tidss_v_crtc.c

@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Subhajit Paul <subhajit_paul@ti.com>
+ */
+
+/*
+ * Virtual crtcs: reuse drivers/gpu/drm/tidss/tidss_crtc.c
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include "tidss_v_display.h"
+
+static DEFINE_MUTEX(out_buffers_lock);
+static LIST_HEAD(out_buffers_list);
+
+struct v_crtc {
+	struct drm_crtc base;
+	struct tidss_device *tidss;
+	int id;
+	unsigned int remote_id;
+
+	bool disable_in_progress;
+	bool enabled;
+	struct drm_pending_vblank_event *event;
+
+	struct mutex vsync_lock;
+
+	struct completion framedone_completion;
+	struct work_struct fake_vsync;
+
+};
+
+struct v_crtc_buffer_ref {
+	struct drm_framebuffer *fb;
+	atomic_t refcount;
+	struct list_head node;
+};
+
+#define to_v_crtc(x) container_of(x, struct v_crtc, base)
+
+static void v_crtc_finish_page_flip(struct drm_crtc *crtc)
+{
+	struct v_crtc *v_crtc = to_v_crtc(crtc);
+	struct drm_pending_vblank_event *event;
+	unsigned long flags;
+
+	spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+	event = v_crtc->event;
+	v_crtc->event = NULL;
+
+	if (!event) {
+		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+		return;
+	}
+
+	drm_crtc_send_vblank_event(crtc, event);
+
+	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+	drm_crtc_vblank_put(crtc);
+}
+
+void v_crtc_vblank_irq(struct drm_crtc *crtc)
+{
+	struct v_crtc *v_crtc = to_v_crtc(crtc);
+
+	drm_crtc_handle_vblank(crtc);
+
+	v_crtc_finish_page_flip(crtc);
+
+	mutex_lock(&v_crtc->vsync_lock);
+	if (v_crtc->disable_in_progress) {
+		v_crtc->disable_in_progress = false;
+		complete(&v_crtc->framedone_completion);
+	}
+	mutex_unlock(&v_crtc->vsync_lock);
+
+}
+
+void v_crtc_commit_done(struct rpmsg_remotedev_display_commit *commit, void *data)
+{
+	struct drm_crtc *crtc = commit->priv;
+
+	v_crtc_vblank_irq(crtc);
+
+	kfree(commit);
+}
+
+void v_crtc_buffer_done(struct rpmsg_remotedev_display_buffer *buffer, void *data)
+{
+	struct v_crtc_buffer_ref *ref = buffer->priv;
+
+	mutex_lock(&out_buffers_lock);
+	if (atomic_dec_and_test(&ref->refcount)) {
+		list_del(&ref->node);
+		kfree(ref);
+		// TODO release the fence here
+	}
+	mutex_unlock(&out_buffers_lock);
+
+	kfree(buffer);
+}
+
+static void fake_vsync_fn(struct work_struct *work)
+{
+	struct v_crtc *v_crtc = container_of(work, struct v_crtc, fake_vsync);
+	struct drm_crtc *crtc = &v_crtc->base;
+
+	v_crtc_vblank_irq(crtc);
+}
+
+
+static u32 v_crtc_update_plane_mask(struct drm_crtc *crtc, struct drm_crtc_state *old, struct drm_crtc_state *new)
+{
+	struct drm_plane *plane;
+	u32 update_plane_mask = 0;
+	u32 cont_u_planes = 0;
+	u32 new_planes = ~old->plane_mask & new->plane_mask;
+	u32 old_planes = ~new->plane_mask & old->plane_mask;
+	u32 cont_planes = old->plane_mask & new->plane_mask;
+
+	drm_for_each_plane_mask(plane, crtc->dev, cont_planes)
+		if (v_plane_update_needed(plane))
+			cont_u_planes |= (1 << drm_plane_index(plane));
+
+
+	update_plane_mask = new_planes | old_planes | cont_u_planes;
+
+	return update_plane_mask;
+}
+
+static struct rpmsg_remotedev_display_buffer *v_crtc_buffer_alloc_for_req(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+		struct drm_plane_state *pstate)
+{
+	struct v_crtc *v_crtc = to_v_crtc(crtc);
+	struct rpmsg_remotedev_display_buffer *buffer;
+	struct v_crtc_buffer_ref *ref, *buffer_ref = NULL;
+	int cnt;
+
+	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+	if (!buffer)
+		return NULL;
+
+
+	mutex_lock(&out_buffers_lock);
+	list_for_each_entry(ref, &out_buffers_list, node) {
+		if (ref->fb == fb) {
+			atomic_inc(&ref->refcount);
+			buffer_ref = ref;
+			break;
+		}
+	}
+	if (buffer_ref)
+		goto found;
+
+	buffer_ref = kzalloc(sizeof(*buffer_ref), GFP_KERNEL);
+	if (!buffer_ref) {
+		mutex_unlock(&out_buffers_lock);
+		return NULL;
+	}
+	buffer_ref->fb = fb;
+	atomic_set(&buffer_ref->refcount, 1);
+	// TODO acquire fence here
+	list_add(&buffer_ref->node, &out_buffers_list);
+
+found:
+	mutex_unlock(&out_buffers_lock);
+
+	buffer->width = pstate->src_w >> 16;
+	buffer->height = pstate->src_h >> 16;
+	buffer->format = fb->format->format;
+	buffer->num_planes = min_t(u32, fb->format->num_planes, RPMSG_REMOTEDEV_DISPLAY_MAX_PLANES);
+	WARN_ON(buffer->num_planes != fb->format->num_planes);
+
+	for (cnt = 0; cnt < buffer->num_planes; cnt++) {
+		struct drm_gem_cma_object *gem;
+
+		gem = drm_fb_cma_get_gem_obj(fb, cnt);
+		buffer->planes[cnt] = gem->paddr + fb->offsets[cnt] +
+			(pstate->src_x >> 16) * fb->format->cpp[cnt] +
+			(pstate->src_y >> 16) * fb->pitches[cnt];
+		buffer->pitches[cnt] = fb->pitches[cnt];
+	}
+
+	buffer->rdev = v_crtc->tidss->rdev;
+	buffer->priv = buffer_ref;
+
+	return buffer;
+}
+
+static bool v_crtc_plane_state_to_vid_update_info(struct drm_crtc *crtc, struct drm_plane *plane,
+		struct rpmsg_remotedev_display_pipe_update *vid)
+{
+	struct drm_plane_state *pstate;
+
+	if (WARN_ON(!plane || !plane->state))
+		return false;
+
+	pstate = plane->state;
+
+	vid->pipe_id = v_plane_get_remote_id(plane);
+
+	if (!pstate->fb) {
+		vid->enabled = false;
+		dev_dbg(plane->dev->dev, "%s: disabling plane 0x%x\n", __func__, v_plane_get_remote_id(plane));
+	} else {
+		struct drm_framebuffer *fb = pstate->fb;
+		struct rpmsg_remotedev_display_buffer *buffer;
+
+		buffer = v_crtc_buffer_alloc_for_req(crtc, fb, pstate);
+		if (!buffer) {
+			dev_err(plane->dev->dev, "%s: could not allocate buffer\n", __func__);
+			return false;
+		}
+
+		if (WARN_ON(!fb->format))
+			return false;
+
+		vid->enabled = true;
+		vid->dst_w = pstate->crtc_w;
+		vid->dst_h = pstate->crtc_h;
+		vid->dst_x = pstate->crtc_x;
+		vid->dst_y = pstate->crtc_y;
+
+		vid->buffer = buffer;
+		dev_dbg(plane->dev->dev, "%s: updating plane 0x%x\n", __func__, v_plane_get_remote_id(plane));
+
+	}
+
+	return true;
+}
+
+
+static void v_crtc_flush_to_remote(struct drm_crtc *crtc, u32 planes, char *stage)
+{
+	struct v_crtc *v_crtc = to_v_crtc(crtc);
+	struct drm_plane *plane;
+	int i = 0;
+	struct rpmsg_remotedev_display_commit *commit;
+
+	if (!planes)
+		return;
+
+	commit = kzalloc(sizeof(*commit), GFP_KERNEL);
+	if (!commit)
+		return;
+
+	dev_dbg(crtc->dev->dev, "%s: flushing out vp in %s = 0x%x\n", __func__, stage, v_crtc->remote_id);
+	drm_for_each_plane_mask(plane, crtc->dev, planes) {
+		bool ret;
+
+		if (WARN_ON(i >= RPMSG_REMOTEDEV_DISPLAY_MAX_PIPES))
+			continue;
+		ret = v_crtc_plane_state_to_vid_update_info(crtc, plane, &commit->pipes[i]);
+		if (!ret) {
+			dev_err(crtc->dev->dev, "%s: error creating vid commit req [%d]\n", __func__, i);
+			return;
+		}
+		i++;
+	}
+
+	commit->disp_id = v_crtc->remote_id;
+	commit->num_pipe_updates = i;
+	commit->priv = crtc;
+	commit->rdev = v_crtc->tidss->rdev;
+
+	v_crtc->tidss->rdev->device.display.ops->commit(v_crtc->tidss->rdev, commit);
+}
+
+static int v_crtc_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
+{
+	dev_dbg(crtc->dev->dev, "%s\n", __func__);
+
+	return 0;
+}
+
+static void v_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state)
+{
+	dev_dbg(crtc->dev->dev, "%s\n", __func__);
+}
+
+static enum drm_mode_status v_crtc_mode_valid(struct drm_crtc *crtc,
+					   const struct drm_display_mode *mode)
+{
+	dev_dbg(crtc->dev->dev, "%s\n", __func__);
+
+	return MODE_OK;
+}
+
+static void v_crtc_atomic_enable(struct drm_crtc *crtc,
+				     struct drm_crtc_state *old_state)
+{
+	struct v_crtc *v_crtc = to_v_crtc(crtc);
+	u32 upd_planes;
+
+	dev_dbg(crtc->dev->dev, "%s\n", __func__);
+
+	WARN_ON(!crtc->state->event);
+
+	/* Turn vertical blanking interrupt reporting on. */
+	drm_crtc_vblank_on(crtc);
+	WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+	v_crtc->enabled = true;
+
+	spin_lock_irq(&crtc->dev->event_lock);
+	if (crtc->state->event) {
+		v_crtc->event = crtc->state->event;
+		crtc->state->event = NULL;
+	}
+	spin_unlock_irq(&crtc->dev->event_lock);
+
+	upd_planes = v_crtc_update_plane_mask(crtc, old_state, crtc->state);
+	if (upd_planes)
+		v_crtc_flush_to_remote(crtc, upd_planes, "enable");
+	else
+		schedule_work(&v_crtc->fake_vsync);
+}
+
+static void v_crtc_atomic_disable(struct drm_crtc *crtc,
+				      struct drm_crtc_state *old_state)
+{
+	struct v_crtc *v_crtc = to_v_crtc(crtc);
+	u32 upd_planes;
+
+	dev_dbg(crtc->dev->dev, "%s\n", __func__);
+
+	reinit_completion(&v_crtc->framedone_completion);
+
+	WARN_ON(!crtc->state->event);
+
+	WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+	mutex_lock(&v_crtc->vsync_lock);
+	v_crtc->disable_in_progress = true;
+	mutex_unlock(&v_crtc->vsync_lock);
+
+	spin_lock_irq(&crtc->dev->event_lock);
+	if (crtc->state->event) {
+		v_crtc->event = crtc->state->event;
+		crtc->state->event = NULL;
+	}
+	spin_unlock_irq(&crtc->dev->event_lock);
+
+	upd_planes = v_crtc_update_plane_mask(crtc, old_state, crtc->state);
+	if (upd_planes)
+		v_crtc_flush_to_remote(crtc, upd_planes, "disable");
+	else
+		schedule_work(&v_crtc->fake_vsync);
+
+	if (!wait_for_completion_timeout(&v_crtc->framedone_completion,
+					 msecs_to_jiffies(500)))
+		dev_err(crtc->dev->dev, "Timeout waiting for disable complete on v_crtc %d",
+			v_crtc->remote_id);
+
+	v_crtc->enabled = false;
+	drm_crtc_vblank_off(crtc);
+}
+
+
+static void v_crtc_atomic_flush(struct drm_crtc *crtc,
+				    struct drm_crtc_state *old_crtc_state)
+{
+	struct v_crtc *v_crtc = to_v_crtc(crtc);
+	u32 upd_planes;
+
+	dev_dbg(crtc->dev->dev, "%s\n", __func__);
+
+	/* Only flush the CRTC if it is currently enabled. */
+	if (!v_crtc->enabled)
+		return;
+
+	/* TODO : check if the old frame is still there */
+
+	// I think we always need the event to signal flip done
+	WARN_ON(!crtc->state->event);
+
+	WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+	spin_lock_irq(&crtc->dev->event_lock);
+
+	if (crtc->state->event) {
+		v_crtc->event = crtc->state->event;
+		crtc->state->event = NULL;
+	}
+
+	spin_unlock_irq(&crtc->dev->event_lock);
+
+	upd_planes = v_crtc_update_plane_mask(crtc, old_crtc_state, crtc->state);
+	if (upd_planes)
+		v_crtc_flush_to_remote(crtc, upd_planes, "flush");
+	else
+		schedule_work(&v_crtc->fake_vsync);
+}
+
+
+static const struct drm_crtc_helper_funcs v_crtc_helper_funcs = {
+	.atomic_check = v_crtc_atomic_check,
+	.atomic_begin = v_crtc_atomic_begin,
+	.atomic_flush = v_crtc_atomic_flush,
+	.atomic_enable = v_crtc_atomic_enable,
+	.atomic_disable = v_crtc_atomic_disable,
+
+	.mode_valid = v_crtc_mode_valid,
+};
+
+static void v_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct v_crtc *v_crtc = to_v_crtc(crtc);
+
+	dev_dbg(crtc->dev->dev, "%s\n", __func__);
+
+	drm_crtc_cleanup(crtc);
+	kfree(v_crtc);
+}
+
+static int v_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+	dev_dbg(crtc->dev->dev, "%s\n", __func__);
+
+	return 0;
+}
+
+static void v_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+	dev_dbg(crtc->dev->dev, "%s\n", __func__);
+}
+
+static const struct drm_crtc_funcs v_crtc_funcs = {
+	.reset = drm_atomic_helper_crtc_reset,
+	.destroy = v_crtc_destroy,
+	.set_config = drm_atomic_helper_set_config,
+	.page_flip = drm_atomic_helper_page_flip,
+	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+	.enable_vblank = v_crtc_enable_vblank,
+	.disable_vblank = v_crtc_disable_vblank,
+};
+
+struct drm_crtc *v_crtc_init(struct tidss_device *tidss, int id,
+		struct drm_plane *plane, struct rpmsg_remotedev_display_disp *vp)
+{
+	struct drm_device *dev = tidss->ddev;
+	struct drm_crtc *crtc = NULL;
+	struct v_crtc *v_crtc;
+	int ret;
+
+	v_crtc = kzalloc(sizeof(*v_crtc), GFP_KERNEL);
+	if (!v_crtc)
+		return NULL;
+
+	v_crtc->tidss = tidss;
+	v_crtc->id = id;
+	v_crtc->remote_id = vp->disp_id;
+	init_completion(&v_crtc->framedone_completion);
+	INIT_WORK(&v_crtc->fake_vsync, fake_vsync_fn);
+
+	mutex_init(&v_crtc->vsync_lock);
+
+	crtc = &v_crtc->base;
+
+	ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+					&v_crtc_funcs, "crtc-0x%x", id);
+	if (ret < 0)
+		goto err;
+
+	drm_crtc_helper_add(crtc, &v_crtc_helper_funcs);
+
+	return crtc;
+
+err:
+	kfree(v_crtc);
+	return NULL;
+}
+
+void v_crtc_fini(struct tidss_device *tidss, struct drm_crtc *crtc)
+{
+	v_crtc_destroy(crtc);
+}
+

+ 31 - 0
drivers/gpu/drm/tidss/tidss_v_display.h

@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Subhajit Paul <subhajit_paul@ti.com>
+ */
+
+#ifndef __TIDSS_V_DISPLAY_H__
+#define __TIDSS_V_DISPLAY_H__
+
+#include "tidss_drv.h"
+
+struct drm_encoder *v_encoder_init(struct tidss_device *tidss, int id,
+		int crtc_mask, struct rpmsg_remotedev_display_disp *vp);
+struct drm_connector *v_connector_init(struct tidss_device *tidss, int id,
+		struct drm_encoder *encoder, struct rpmsg_remotedev_display_disp *vp);
+struct drm_plane *v_plane_init(struct tidss_device *tidss, int id,
+		int crtc_bitmask, enum drm_plane_type type, struct rpmsg_remotedev_display_disp *vp, int index);
+struct drm_crtc *v_crtc_init(struct tidss_device *tidss, int id,
+		struct drm_plane *plane, struct rpmsg_remotedev_display_disp *vp);
+
+void v_crtc_fini(struct tidss_device *tidss, struct drm_crtc *crtc);
+void v_plane_fini(struct tidss_device *tidss, struct drm_plane *plane);
+void v_connector_fini(struct tidss_device *tidss, struct drm_connector *connector);
+void v_encoder_fini(struct tidss_device *tidss, struct drm_encoder *encoder);
+
+unsigned int v_plane_get_remote_id(struct drm_plane *plane);
+bool v_plane_update_needed(struct drm_plane *plane);
+
+void v_crtc_commit_done(struct rpmsg_remotedev_display_commit *commit, void *data);
+void v_crtc_buffer_done(struct rpmsg_remotedev_display_buffer *buffer, void *data);
+#endif

+ 101 - 0
drivers/gpu/drm/tidss/tidss_v_encoder.c

@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Subhajit Paul <subhajit_paul@ti.com>
+ */
+
+/*
+ * Virtual encoders : reuse drivers/gpu/drm/tidss/tidss_encoder.c
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include "tidss_v_display.h"
+
+struct v_encoder {
+	struct drm_encoder base;
+	struct tidss_device *tidss;
+	int id;
+	int possible_crtcs;
+};
+
+#define to_v_encoder(x) container_of(x, struct v_encoder, base)
+
+static void v_encoder_disable(struct drm_encoder *encoder)
+{
+	dev_dbg(encoder->dev->dev, "%s\n", __func__);
+}
+
+static void v_encoder_enable(struct drm_encoder *encoder)
+{
+	dev_dbg(encoder->dev->dev, "%s\n", __func__);
+}
+
+static int v_encoder_atomic_check(struct drm_encoder *encoder,
+				      struct drm_crtc_state *crtc_state,
+				      struct drm_connector_state *conn_state)
+{
+	dev_dbg(encoder->dev->dev, "%s\n", __func__);
+
+	return 0;
+}
+
+static void v_encoder_atomic_mode_set(struct drm_encoder *encoder,
+				   struct drm_crtc_state *crtc_state,
+				   struct drm_connector_state *conn_state)
+{
+	dev_dbg(encoder->dev->dev, "%s\n", __func__);
+}
+
+static const struct drm_encoder_helper_funcs v_encoder_helper_funcs = {
+	.atomic_mode_set = v_encoder_atomic_mode_set,
+	.disable = v_encoder_disable,
+	.enable = v_encoder_enable,
+	.atomic_check = v_encoder_atomic_check,
+};
+
+static void v_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct v_encoder *v_encoder = to_v_encoder(encoder);
+
+	dev_dbg(encoder->dev->dev, "%s\n", __func__);
+
+	drm_encoder_cleanup(encoder);
+	kfree(v_encoder);
+}
+
+static const struct drm_encoder_funcs v_encoder_funcs = {
+	.destroy = v_encoder_destroy,
+};
+
+struct drm_encoder *v_encoder_init(struct tidss_device *tidss, int id, int crtc_mask,
+		struct rpmsg_remotedev_display_disp *vp)
+{
+	struct drm_device *dev = tidss->ddev;
+	struct drm_encoder *encoder = NULL;
+	struct v_encoder *v_encoder;
+
+	v_encoder = kzalloc(sizeof(*v_encoder), GFP_KERNEL);
+	if (!v_encoder)
+		return NULL;
+
+	v_encoder->tidss = tidss;
+	v_encoder->id = id;
+	v_encoder->possible_crtcs = crtc_mask;
+
+	encoder = &v_encoder->base;
+	encoder->possible_crtcs = crtc_mask;
+
+	drm_encoder_init(dev, encoder, &v_encoder_funcs,
+			 DRM_MODE_ENCODER_VIRTUAL, "encoder-0x%x", id);
+	drm_encoder_helper_add(encoder, &v_encoder_helper_funcs);
+
+	return encoder;
+}
+
+void v_encoder_fini(struct tidss_device *tidss, struct drm_encoder *encoder)
+{
+	v_encoder_destroy(encoder);
+}

+ 276 - 0
drivers/gpu/drm/tidss/tidss_v_plane.c

@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Subhajit Paul <subhajit_paul@ti.com>
+ */
+
+/*
+ * Virtual Planes: reuse drivers/gpu/drm/tidss/tidss_plane.c
+ */
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include "tidss_v_display.h"
+
+#define V_PLANE_MAX_FORMATS (32)
+
+struct v_plane_state {
+	struct drm_plane_state state;
+	bool need_update;
+};
+
+struct v_plane {
+	struct drm_plane base;
+	struct tidss_device *tidss;
+	unsigned int remote_id;
+	int possible_crtcs;
+	int id;
+
+	int nformats;
+	u32 formats[V_PLANE_MAX_FORMATS];
+
+	bool can_scale;
+	bool can_mod_win;
+	int fx;
+	int fy;
+	int fw;
+	int fh;
+};
+
+#define to_v_plane(x) container_of(x, struct v_plane, base)
+#define to_v_plane_state(x) container_of(x, struct v_plane_state, state)
+
+unsigned int v_plane_get_remote_id(struct drm_plane *plane)
+{
+	struct v_plane *v_plane = to_v_plane(plane);
+
+	return v_plane->remote_id;
+}
+
+bool v_plane_update_needed(struct drm_plane *plane)
+{
+	struct v_plane_state *v_state = to_v_plane_state(plane->state);
+
+	return v_state->need_update;
+}
+
+static bool v_plane_state_changed(struct drm_plane_state *old, struct drm_plane_state *new)
+{
+	if (old->crtc != new->crtc)
+		return true;
+
+	if (old->fb != new->fb)
+		return true;
+
+	if (old->crtc_x != new->crtc_x)
+		return true;
+
+	if (old->crtc_y != new->crtc_y)
+		return true;
+
+	if (old->crtc_w != new->crtc_w)
+		return true;
+
+	if (old->crtc_h != new->crtc_h)
+		return true;
+
+	if (old->src_x != new->src_x)
+		return true;
+
+	if (old->src_y != new->src_y)
+		return true;
+
+	if (old->src_w != new->src_w)
+		return true;
+
+	if (old->src_h != new->src_h)
+		return true;
+
+	return false;
+}
+
+static int v_plane_atomic_check(struct drm_plane *plane,
+				    struct drm_plane_state *state)
+{
+	struct v_plane *v_plane = to_v_plane(plane);
+	struct drm_crtc_state *crtc_state;
+	int ret;
+
+	dev_dbg(plane->dev->dev, "%s\n", __func__);
+
+	if (!state->crtc) {
+		/*
+		 * The visible field is not reset by the DRM core but only
+		 * updated by drm_plane_helper_check_state(), set it manually.
+		 */
+		state->visible = false;
+		return 0;
+	}
+
+	crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+	if (IS_ERR(crtc_state))
+		return PTR_ERR(crtc_state);
+
+	ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+						  0,
+						  INT_MAX,
+						  true, true);
+	if (ret < 0)
+		return ret;
+
+	if (v_plane->can_scale == false && (
+				((state->src_w >> 16) != state->crtc_w) ||
+				((state->src_h >> 16) != state->crtc_h)))
+		return -EINVAL;
+
+	if (v_plane->can_mod_win == false && (
+				state->crtc_x != v_plane->fx ||
+				state->crtc_y != v_plane->fy ||
+				state->crtc_w != v_plane->fw ||
+				state->crtc_h != v_plane->fh))
+		return -EINVAL;
+
+	return 0;
+}
+
+static void v_plane_atomic_update(struct drm_plane *plane,
+				      struct drm_plane_state *old_state)
+{
+	struct drm_plane_state *plane_state = plane->state;
+	struct v_plane_state *v_state = to_v_plane_state(plane_state);
+
+	dev_dbg(plane->dev->dev, "%s\n", __func__);
+
+	v_state->need_update = v_plane_state_changed(old_state, plane_state);
+}
+
+static void v_plane_atomic_disable(struct drm_plane *plane, struct drm_plane_state *old_state)
+{
+	struct drm_plane_state *plane_state = plane->state;
+	struct v_plane_state *v_state = to_v_plane_state(plane_state);
+
+	dev_dbg(plane->dev->dev, "%s\n", __func__);
+
+	v_state->need_update = v_plane_state_changed(old_state, plane_state);
+}
+
+static const struct drm_plane_helper_funcs v_plane_helper_funcs = {
+	.atomic_check = v_plane_atomic_check,
+	.atomic_update = v_plane_atomic_update,
+	.atomic_disable = v_plane_atomic_disable,
+};
+
+static void v_plane_destroy(struct drm_plane *plane)
+{
+	struct v_plane *v_plane = to_v_plane(plane);
+
+	dev_dbg(plane->dev->dev, "%s\n", __func__);
+
+	drm_plane_cleanup(plane);
+	kfree(v_plane);
+}
+
+struct drm_plane_state *
+v_plane_atomic_duplicate_state(struct drm_plane *plane)
+{
+	struct v_plane_state *v_state;
+	struct drm_plane_state *state;
+
+	if (WARN_ON(!plane->state))
+		return NULL;
+
+	v_state = kmalloc(sizeof(*v_state), GFP_KERNEL);
+	if (WARN_ON(!v_state))
+		return NULL;
+
+	state = &v_state->state;
+	__drm_atomic_helper_plane_duplicate_state(plane, &v_state->state);
+
+	v_state->need_update = false;
+
+	return state;
+}
+
+void v_plane_atomic_destroy_state(struct drm_plane *plane,
+					   struct drm_plane_state *state)
+{
+	struct v_plane_state *v_state = to_v_plane_state(state);
+
+	if (WARN_ON(!state))
+		return;
+
+	__drm_atomic_helper_plane_destroy_state(state);
+
+	kfree(v_state);
+}
+
+static const struct drm_plane_funcs v_plane_funcs = {
+	.update_plane = drm_atomic_helper_update_plane,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.reset = drm_atomic_helper_plane_reset,
+	.destroy = v_plane_destroy,
+	.atomic_duplicate_state = v_plane_atomic_duplicate_state,
+	.atomic_destroy_state = v_plane_atomic_destroy_state,
+};
+
+struct drm_plane *v_plane_init(struct tidss_device *tidss,
+		int id, int crtc_bitmask,
+		enum drm_plane_type type,
+		struct rpmsg_remotedev_display_disp *vp, int index)
+{
+	struct drm_device *dev = tidss->ddev;
+	struct drm_plane *plane;
+	struct v_plane *v_plane;
+	int ret;
+	int fmt;
+	struct rpmsg_remotedev_display_pipe *vid = &vp->pipes[index];
+
+	if (vid->num_formats > V_PLANE_MAX_FORMATS)
+		return NULL;
+
+	v_plane = kzalloc(sizeof(*v_plane), GFP_KERNEL);
+	if (!v_plane)
+		return NULL;
+
+	v_plane->tidss = tidss;
+	v_plane->id = id;
+	v_plane->remote_id = vid->pipe_id;
+	v_plane->can_scale = vid->can_scale;
+	v_plane->can_mod_win = vid->can_mod_win;
+	if (!v_plane->can_mod_win) {
+		v_plane->fx = vid->fixed_win_x;
+		v_plane->fy = vid->fixed_win_y;
+		v_plane->fw = vid->fixed_win_w;
+		v_plane->fh = vid->fixed_win_h;
+	}
+	v_plane->possible_crtcs = crtc_bitmask;
+	v_plane->nformats = vid->num_formats;
+
+	for (fmt = 0; fmt < vid->num_formats; fmt++)
+		v_plane->formats[fmt] = vid->formats[fmt];
+
+	plane = &v_plane->base;
+
+	ret = drm_universal_plane_init(dev, plane, crtc_bitmask,
+				       &v_plane_funcs, v_plane->formats,
+				       v_plane->nformats, NULL, type, "plane-0x%x", id);
+	if (ret < 0)
+		goto error;
+
+	drm_plane_helper_add(plane, &v_plane_helper_funcs);
+
+	drm_plane_create_zpos_immutable_property(plane, vid->initial_zorder);
+
+	return plane;
+
+error:
+	kfree(v_plane);
+	return NULL;
+}
+
+void v_plane_fini(struct tidss_device *tidss, struct drm_plane *plane)
+{
+	v_plane_destroy(plane);
+}

+ 25 - 0
drivers/rpmsg-kdrv/Kconfig

@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Rpmsg virtual device drivers"
+
+# RPMSG always gets selected by whoever wants it
+config RPMSG_KDRV
+	tristate "RPMSG virtual device interface"
+	select RPMSG
+	help
+	  Say Y here enable support for RPMSG based remote devices, usually
+	  exported by a firmware running rpmsg stack and remote_device stack.
+	  This feature enables the framework for para-virtualizing entire H/W
+	  or specific resources of a hardware
+
+
+config RPMSG_KDRV_DISPLAY
+	tristate "RPMSG virtual display device support"
+	select RPMSG_KDRV
+	help
+	  Say Y here to enable support for remote device based display
+	  virtualization. This setup expects that the display will be driven
+	  by a remoteproc and DRM driver will be able to use display features
+	  using remote_device framework
+
+endmenu

+ 3 - 0
drivers/rpmsg-kdrv/Makefile

@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_RPMSG_KDRV)		+= rpmsg_kdrv.o
+obj-$(CONFIG_RPMSG_KDRV_DISPLAY)	+= rpmsg_kdrv_display.o

+ 742 - 0
drivers/rpmsg-kdrv/rpmsg_kdrv.c

@@ -0,0 +1,742 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Subhajit Paul <subahjit_paul@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include <linux/rpmsg.h>
+#include <linux/rpmsg-remotedev/rpmsg-remotedev.h>
+#include "shared/rpmsg-kdrv-transport.h"
+#include "rpmsg_kdrv_internal.h"
+
+struct rpmsg_kdrv_priv {
+	struct rpmsg_device *rpdev;
+
+	struct idr message_idr;
+	struct mutex message_lock;
+
+	int num_raw_devices;
+	struct rpmsg_kdrv_init_device_info raw_devices[RPMSG_KDRV_TP_MAX_DEVICES];
+	void *raw_device_data[RPMSG_KDRV_TP_MAX_DEVICES];
+	int raw_device_data_size[RPMSG_KDRV_TP_MAX_DEVICES];
+};
+
+struct rpmsg_kdrv_ctx {
+	struct rpmsg_device *rpdev;
+	bool wait_for_response;
+	request_cb_t callback;
+	void *cb_data;
+	bool response_recv;
+	struct wait_queue_head response_wq;
+
+	struct rpmsg_kdrv_device_header *dev_hdr;
+	void *req;
+	void *resp;
+	int req_size;
+	int resp_size;
+};
+
+static struct bus_type rpmsg_kdrv_bus;
+
+#define to_rpmsg_kdrv_device(d) container_of(d, struct rpmsg_kdrv_device, dev)
+#define to_rpmsg_kdrv_driver(d) container_of(d, struct rpmsg_kdrv_driver, drv)
+
+static int rpmsg_kdrv_match_id(struct device *dev, void *data)
+{
+	uint32_t *idptr = data;
+	struct rpmsg_kdrv_device *kddev = container_of(dev, struct rpmsg_kdrv_device, dev);
+
+	if (kddev->device_id == *idptr)
+		return 1;
+	return 0;
+}
+
+static int rpmsg_kdrv_match_remotedev(struct device *dev, void *data)
+{
+	struct rpmsg_remotedev *rdev = data;
+	struct rpmsg_kdrv_device *kddev = container_of(dev, struct rpmsg_kdrv_device, dev);
+
+	if (kddev->remotedev == rdev)
+		return 1;
+	return 0;
+}
+
+static int rpmsg_kdrv_match_name(struct device *dev, void *data)
+{
+	const char *name = data;
+	struct rpmsg_kdrv_device *kddev = container_of(dev, struct rpmsg_kdrv_device, dev);
+
+	if (strcmp(kddev->device_name, name) == 0)
+		return 1;
+	return 0;
+}
+
+int rpmsg_kdrv_register_driver(struct rpmsg_kdrv_driver *drv)
+{
+	int ret;
+
+	drv->drv.bus = &rpmsg_kdrv_bus;
+	drv->drv.owner = THIS_MODULE;
+
+	ret = driver_register(&drv->drv);
+	if (ret)
+		pr_err("%s: driver_register failed\n", __func__);
+
+	return ret;
+}
+EXPORT_SYMBOL(rpmsg_kdrv_register_driver);
+
+static void rpmsg_kdrv_driver_handle_data(struct rpmsg_device *rpdev, void *data, int len, void *private, u32 src)
+{
+	struct device *dev;
+	struct rpmsg_kdrv_device_header *hdr = data;
+	struct rpmsg_kdrv_device *kddev = NULL;
+	struct rpmsg_kdrv_driver *kddrv = NULL;
+	void *message;
+	int message_size;
+	uint32_t msg_device_id;
+	int ret;
+
+	msg_device_id = hdr->device_id;
+	dev = bus_find_device(&rpmsg_kdrv_bus, NULL, &(msg_device_id), rpmsg_kdrv_match_id);
+	if (!dev) {
+		dev_err(&rpdev->dev, "%s: message received for unknown device\n", __func__);
+		return;
+	}
+	kddev = container_of(dev, struct rpmsg_kdrv_device, dev);
+	kddrv = to_rpmsg_kdrv_driver(kddev->dev.driver);
+	if (!kddrv) {
+		dev_err(&rpdev->dev, "%s: message received for device with no driver\n", __func__);
+		return;
+	}
+
+	message = (void *)(&hdr[1]);
+	message_size = len - sizeof(*hdr);
+	ret = kddrv->callback(kddev, message, message_size);
+	if (ret)
+		dev_err(&rpdev->dev, "%s: message callback returns %d\n", __func__, ret);
+
+}
+
+static int rpmsg_kdrv_connect(struct rpmsg_device *rpdev, struct rpmsg_kdrv_device *kddev)
+{
+	int ret;
+	struct rpmsg_kdrv_init_connect_message *connect_req;
+
+	connect_req = devm_kzalloc(&rpdev->dev, sizeof(*connect_req), GFP_KERNEL);
+	if (!connect_req)
+		return -ENOMEM;
+
+	connect_req->header.message_type = RPMSG_KDRV_TP_INIT_CONNECT_MESSAGE;
+	connect_req->device_id = kddev->device_id;
+
+	ret = rpmsg_kdrv_send_message(rpdev, RPMSG_KDRV_TP_DEVICE_ID_INIT,
+			connect_req, sizeof(*connect_req));
+
+	devm_kfree(&rpdev->dev, connect_req);
+	return ret;
+}
+
+static int rpmsg_kdrv_disconnect(struct rpmsg_device *rpdev, struct rpmsg_kdrv_device *kddev)
+{
+	int ret;
+	struct rpmsg_kdrv_init_disconnect_message *disconnect_req;
+
+	disconnect_req = devm_kzalloc(&rpdev->dev, sizeof(*disconnect_req), GFP_KERNEL);
+	if (!disconnect_req)
+		return -ENOMEM;
+
+	disconnect_req->header.message_type = RPMSG_KDRV_TP_INIT_DISCONNECT_MESSAGE;
+	disconnect_req->device_id = kddev->device_id;
+
+	ret = rpmsg_kdrv_send_message(rpdev, RPMSG_KDRV_TP_DEVICE_ID_INIT,
+			disconnect_req, sizeof(*disconnect_req));
+
+	devm_kfree(&rpdev->dev, disconnect_req);
+	return ret;
+}
+
+struct rpmsg_remotedev *rpmsg_remotedev_get_named_device(const char *device_name)
+{
+	struct device *dev;
+	struct rpmsg_kdrv_device *kddev = NULL;
+
+	dev = bus_find_device(&rpmsg_kdrv_bus, NULL, (void *)device_name, rpmsg_kdrv_match_name);
+	if (!dev)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	kddev = container_of(dev, struct rpmsg_kdrv_device, dev);
+	if (!kddev->remotedev)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	rpmsg_kdrv_connect(kddev->rpdev, kddev);
+
+	return kddev->remotedev;
+}
+EXPORT_SYMBOL(rpmsg_remotedev_get_named_device);
+
+void rpmsg_remotedev_put_device(struct rpmsg_remotedev *rdev)
+{
+	struct device *dev;
+	struct rpmsg_kdrv_device *kddev = NULL;
+
+	dev = bus_find_device(&rpmsg_kdrv_bus, NULL, (void *)rdev, rpmsg_kdrv_match_remotedev);
+	if (!dev) {
+		pr_err("%s: could not find device for remotedev\n", __func__);
+		return;
+	}
+
+	kddev = container_of(dev, struct rpmsg_kdrv_device, dev);
+
+	rpmsg_kdrv_disconnect(kddev->rpdev, kddev);
+}
+EXPORT_SYMBOL(rpmsg_remotedev_put_device);
+
+static void rpmsg_kdrv_release_device(struct device *dev)
+{
+	struct rpmsg_kdrv_device *kddev = to_rpmsg_kdrv_device(dev);
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	devm_kfree(&kddev->rpdev->dev, kddev);
+}
+
+static struct rpmsg_kdrv_device *rpmsg_kdrv_device_create(struct rpmsg_device *rpdev, int index)
+{
+	struct rpmsg_kdrv_device *kddev = devm_kzalloc(&rpdev->dev, sizeof(*kddev), GFP_KERNEL);
+	struct rpmsg_kdrv_priv *priv = dev_get_drvdata(&rpdev->dev);
+	struct rpmsg_kdrv_init_device_info *dev = &priv->raw_devices[index];
+	int ret;
+
+	if (!kddev) {
+		dev_err(&rpdev->dev, "%s: could not allocate kddev\n", __func__);
+		return NULL;
+	}
+
+	kddev->rpdev = rpdev;
+	kddev->device_id = dev->device_id;
+	kddev->device_type = dev->device_type;
+	kddev->device_data_len = priv->raw_device_data_size[index];
+	kddev->device_data = priv->raw_device_data[index];
+	kddev->device_name = devm_kstrdup(&rpdev->dev, dev->device_name, GFP_KERNEL);
+	if (!kddev->device_name) {
+		dev_err(&rpdev->dev, "%s: could not allocate device name\n", __func__);
+		devm_kfree(&rpdev->dev, kddev);
+		return NULL;
+	}
+
+	kddev->dev.parent = &rpdev->dev;
+	kddev->dev.release = rpmsg_kdrv_release_device;
+	kddev->dev.bus = &rpmsg_kdrv_bus;
+
+	dev_set_name(&kddev->dev, "rpmsg-kdrv-%u-%s", dev->device_id, dev->device_name);
+
+	ret = device_register(&kddev->dev);
+	if (ret) {
+		dev_err(&rpdev->dev, "%s: device_register failed: %d\n", __func__, ret);
+		put_device(&kddev->dev);
+		return NULL;
+	}
+	dev_dbg(&rpdev->dev, "%s: registered new device : %s\n", __func__, dev_name(&kddev->dev));
+
+	return kddev;
+}
+
+static int rpmsg_kdrv_get_devices_cb(void *cb_data, void *req, int req_sz, void *resp, int resp_sz)
+{
+	int i, cnt;
+	struct rpmsg_device *rpdev = cb_data;
+	struct rpmsg_kdrv_priv *priv = dev_get_drvdata(&rpdev->dev);
+	struct rpmsg_kdrv_init_dev_info_response *info_resp = resp;
+	struct rpmsg_kdrv_init_device_info *dev;
+	int ret;
+
+	if (info_resp->header.message_type != RPMSG_KDRV_TP_INIT_DEV_INFO_RESPONSE) {
+		dev_err(&rpdev->dev, "%s: wrong response type\n", __func__);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	for (i = 0; i < info_resp->num_devices; i++) {
+		dev = &info_resp->devices[i];
+		cnt = priv->num_raw_devices;
+
+		priv->raw_device_data_size[cnt] = dev->device_data_len;
+		priv->raw_device_data[cnt] = devm_kzalloc(&rpdev->dev, dev->device_data_len, GFP_KERNEL);
+		if (!priv->raw_device_data[cnt])
+			goto out;
+		memcpy(priv->raw_device_data[cnt],
+				&info_resp->device_data[dev->device_data_offset],
+				dev->device_data_len);
+		memcpy(&priv->raw_devices[cnt], dev, sizeof(*dev));
+		priv->num_raw_devices++;
+
+		dev_dbg(&rpdev->dev, "new device: %s\n", dev->device_name);
+	}
+
+	for (i = 0; i < priv->num_raw_devices; i++)
+		rpmsg_kdrv_device_create(rpdev, i);
+
+out:
+	devm_kfree(&rpdev->dev, req);
+	return ret;
+}
+
+static int rpmsg_kdrv_get_devices(struct rpmsg_device *rpdev)
+{
+	int ret;
+	struct rpmsg_kdrv_init_dev_info_request *info_req;
+
+	info_req = devm_kzalloc(&rpdev->dev, sizeof(*info_req), GFP_KERNEL);
+	if (!info_req)
+		return -ENOMEM;
+
+	info_req->header.message_type = RPMSG_KDRV_TP_INIT_DEV_INFO_REQUEST;
+
+	ret = rpmsg_kdrv_send_request_with_callback(rpdev, RPMSG_KDRV_TP_DEVICE_ID_INIT,
+			info_req, sizeof(*info_req), rpdev, rpmsg_kdrv_get_devices_cb);
+	if (ret)
+		goto nosend;
+
+	return 0;
+
+nosend:
+	devm_kfree(&rpdev->dev, info_req);
+	return ret;
+}
+
+static void rpmsg_kdrv_del_packet_id(struct rpmsg_device *rpdev, int id)
+{
+	struct rpmsg_kdrv_priv *priv = dev_get_drvdata(&rpdev->dev);
+
+	mutex_lock(&priv->message_lock);
+	idr_remove(&priv->message_idr, id);
+	mutex_unlock(&priv->message_lock);
+}
+
+static uint32_t rpmsg_kdrv_new_packet_id(struct rpmsg_device *rpdev, void *data)
+{
+	struct rpmsg_kdrv_priv *priv = dev_get_drvdata(&rpdev->dev);
+	int id;
+
+	mutex_lock(&priv->message_lock);
+	id = idr_alloc(&priv->message_idr, data, RPMSG_KDRV_TP_PACKET_ID_FIRST, 0, GFP_KERNEL);
+	mutex_unlock(&priv->message_lock);
+
+	if (id < 0)
+		return 0;
+
+	return id;
+}
+
+static void rpmsg_kdrv_dev_hdr_delete(struct rpmsg_device *rpdev, struct rpmsg_kdrv_device_header *hdr)
+{
+	rpmsg_kdrv_del_packet_id(rpdev, hdr->packet_id);
+	devm_kfree(&rpdev->dev, hdr);
+}
+
+static struct rpmsg_kdrv_device_header *rpmsg_kdrv_dev_hdr_alloc(struct rpmsg_device *rpdev,
+		int device_id, int size, int pkt_type, int pkt_src, void *msg, int len, struct rpmsg_kdrv_ctx *ctx)
+{
+	struct rpmsg_kdrv_device_header *dev_hdr;
+	void *dst;
+
+	dev_hdr = devm_kzalloc(&rpdev->dev, size, GFP_KERNEL);
+	if (!dev_hdr)
+		return NULL;
+
+	dev_hdr->device_id = device_id;
+	dev_hdr->packet_type = pkt_type;
+	dev_hdr->packet_source = pkt_src;
+	dev_hdr->packet_size = size;
+	dev_hdr->packet_id = RPMSG_KDRV_TP_PACKET_ID_NONE;
+
+
+	dst = (void *)(&dev_hdr[1]);
+	memcpy(dst, msg, len);
+
+	if (pkt_type == RPMSG_KDRV_TP_PACKET_TYPE_MESSAGE)
+		return dev_hdr;
+
+	dev_hdr->packet_id = rpmsg_kdrv_new_packet_id(rpdev, ctx);
+	if (!dev_hdr->packet_id) {
+		devm_kfree(&rpdev->dev, dev_hdr);
+		return NULL;
+	}
+
+	ctx->dev_hdr = dev_hdr;
+
+	return dev_hdr;
+}
+
+static struct rpmsg_kdrv_ctx *rpmsg_kdrv_ctx_alloc(struct rpmsg_device *rpdev, bool blocking,
+		request_cb_t callback, void *cb_data, void *req, int req_size, void *resp, int resp_size)
+{
+	struct rpmsg_kdrv_ctx *ctx;
+
+	ctx = devm_kzalloc(&rpdev->dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return NULL;
+
+	ctx->rpdev = rpdev;
+	if (blocking) {
+		ctx->wait_for_response = true;
+		ctx->response_recv = false;
+		init_waitqueue_head(&ctx->response_wq);
+	} else {
+		ctx->wait_for_response = false;
+		ctx->callback = callback;
+	}
+
+	ctx->cb_data = cb_data;
+	ctx->req = req;
+	ctx->req_size = req_size;
+	ctx->resp = resp;
+	ctx->resp_size = resp_size;
+
+	return ctx;
+}
+
+static int rpmsg_kdrv_send_packet(struct rpmsg_device *rpdev, void *data, int len)
+{
+	return rpmsg_send(rpdev->ept, data, len);
+}
+
+/*
+ * rpmsg_kdrv_send_request_with_callback
+ *
+ * Send a message where
+ * a) the caller does not block
+ * b) the caller expects multile responses
+ *
+ * The callback function must return
+ * a) RRMSG_KDRV_CALLBACK_DONE when no more responses are expected
+ * b) RPMSG_KDRV_CALLBACK_MORE when more responses are awaited
+ *
+ * The caller is expected to destroy message when it does not
+ * expect any more responses
+ */
+int rpmsg_kdrv_send_request_with_callback(struct rpmsg_device *rpdev, uint32_t device_id,
+		void *message, uint32_t message_size,
+		void *cb_data, request_cb_t callback)
+{
+	struct rpmsg_kdrv_device_header *dev_hdr;
+	int total_size = message_size + sizeof(*dev_hdr);
+	struct rpmsg_kdrv_ctx *ctx = NULL;
+	int ret;
+
+	ctx = rpmsg_kdrv_ctx_alloc(rpdev, false, callback, cb_data, message, message_size, NULL, 0);
+	if (!ctx) {
+		dev_err(&rpdev->dev, "%s: ctx allocation failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	dev_hdr = rpmsg_kdrv_dev_hdr_alloc(rpdev, device_id, total_size,
+			RPMSG_KDRV_TP_PACKET_TYPE_REQUEST,
+			RPMSG_KDRV_TP_PACKET_SOURCE_CLIENT,
+			message, message_size,
+			ctx);
+	if (!dev_hdr) {
+		dev_err(&rpdev->dev, "%s: device header allocation failed\n", __func__);
+		ret = -ENOMEM;
+		goto dev_hdr_fail;
+	}
+
+	ret = rpmsg_kdrv_send_packet(rpdev, dev_hdr, total_size);
+	if (ret) {
+		dev_err(&rpdev->dev, "rpmsg_send failed: %d\n", ret);
+		goto nosend;
+	}
+
+	return 0;
+
+nosend:
+	rpmsg_kdrv_dev_hdr_delete(rpdev, dev_hdr);
+dev_hdr_fail:
+	devm_kfree(&rpdev->dev, ctx);
+	return ret;
+}
+EXPORT_SYMBOL(rpmsg_kdrv_send_request_with_callback);
+
+/*
+ * rpmsg_kdrv_send_request_with_response
+ *
+ * Send a message where the caller will block for a response
+ *
+ * The caller is expected to destroy message and response
+ * when this function returns
+ */
+int rpmsg_kdrv_send_request_with_response(struct rpmsg_device *rpdev, uint32_t device_id,
+		void *message, uint32_t message_size,
+		void *response, uint32_t response_size)
+{
+	struct rpmsg_kdrv_device_header *dev_hdr;
+	int total_size = message_size + sizeof(*dev_hdr);
+	struct rpmsg_kdrv_ctx *ctx = NULL;
+	int ret;
+
+	ctx = rpmsg_kdrv_ctx_alloc(rpdev, true, NULL, NULL, message, message_size, response, response_size);
+	if (!ctx) {
+		dev_err(&rpdev->dev, "%s: ctx allocation failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	dev_hdr = rpmsg_kdrv_dev_hdr_alloc(rpdev, device_id, total_size,
+			RPMSG_KDRV_TP_PACKET_TYPE_REQUEST,
+			RPMSG_KDRV_TP_PACKET_SOURCE_CLIENT,
+			message, message_size,
+			ctx);
+	if (!dev_hdr) {
+		dev_err(&rpdev->dev, "%s: device header allocation failed\n", __func__);
+		ret = -ENOMEM;
+		goto dev_hdr_fail;
+	}
+
+	ret = rpmsg_kdrv_send_packet(rpdev, dev_hdr, total_size);
+	if (ret) {
+		dev_err(&rpdev->dev, "rpmsg_send failed: %d\n", ret);
+		goto nosend;
+	}
+
+	wait_event(ctx->response_wq, ctx->response_recv == true);
+
+nosend:
+	rpmsg_kdrv_dev_hdr_delete(rpdev, dev_hdr);
+dev_hdr_fail:
+	devm_kfree(&rpdev->dev, ctx);
+	return ret;
+}
+EXPORT_SYMBOL(rpmsg_kdrv_send_request_with_response);
+
+/*
+ * rpmsg_kdrv_send_message
+ *
+ * Send a message and dont expect a response
+ *
+ * The caller is expected to destroy message when
+ * this function returns
+ */
+int rpmsg_kdrv_send_message(struct rpmsg_device *rpdev, uint32_t device_id,
+		void *message, uint32_t message_size)
+{
+	struct rpmsg_kdrv_device_header *dev_hdr;
+	int total_size = message_size + sizeof(*dev_hdr);
+	int ret;
+
+	/* We dont need a ctx for direct messages */
+
+	dev_hdr = rpmsg_kdrv_dev_hdr_alloc(rpdev, device_id, total_size,
+			RPMSG_KDRV_TP_PACKET_TYPE_MESSAGE,
+			RPMSG_KDRV_TP_PACKET_SOURCE_CLIENT,
+			message, message_size,
+			NULL);
+	if (!dev_hdr) {
+		dev_err(&rpdev->dev, "%s: device header allocation failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	ret = rpmsg_kdrv_send_packet(rpdev, dev_hdr, total_size);
+	if (ret) {
+		dev_err(&rpdev->dev, "%s: rpmsg_send failed: %d\n", __func__, ret);
+		goto out;
+	}
+
+out:
+	rpmsg_kdrv_dev_hdr_delete(rpdev, dev_hdr);
+	return ret;
+}
+EXPORT_SYMBOL(rpmsg_kdrv_send_message);
+
+static int rpmsg_kdrv_cb(struct rpmsg_device *rpdev, void *data, int len,
+						void *private, u32 src)
+{
+	struct rpmsg_kdrv_priv *priv = dev_get_drvdata(&rpdev->dev);
+	struct rpmsg_kdrv_device_header *hdr = data;
+	struct rpmsg_kdrv_message_header *msg;
+	int msg_len;
+	struct rpmsg_kdrv_ctx *ctx;
+	int ret;
+
+	if (hdr->packet_type != RPMSG_KDRV_TP_PACKET_TYPE_RESPONSE) {
+		rpmsg_kdrv_driver_handle_data(rpdev, data, len, private, src);
+		return 0;
+	}
+
+	mutex_lock(&priv->message_lock);
+	ctx = idr_find(&priv->message_idr, hdr->packet_id);
+	mutex_unlock(&priv->message_lock);
+
+	if (!ctx) {
+		dev_err(&rpdev->dev, "%s: response received with no pending request\n", __func__);
+		return 0;
+	}
+
+	msg = (struct rpmsg_kdrv_message_header *)((void *)(&hdr[1]));
+	msg_len = len - sizeof(*hdr);
+
+	/* process callback if expected */
+	if (ctx->callback) {
+		ret = ctx->callback(ctx->cb_data, ctx->req, ctx->req_size, msg, msg_len);
+		if (ret == RRMSG_KDRV_CALLBACK_DONE) {
+			/* No need to keep the ctx alive */
+			rpmsg_kdrv_dev_hdr_delete(rpdev, ctx->dev_hdr);
+			devm_kfree(&rpdev->dev, ctx);
+		}
+		return 0;
+	}
+
+	/* copy the response and wake up caller, caller will destroy ctx & dev_hdr */
+	memcpy(ctx->resp, msg, min(msg_len, ctx->resp_size));
+
+	ctx->response_recv = true;
+	wake_up(&ctx->response_wq);
+
+	return 0;
+}
+
+static int rpmsg_kdrv_dev_match(struct device *dev, struct device_driver *drv)
+{
+	struct rpmsg_kdrv_device *kddev = to_rpmsg_kdrv_device(dev);
+	struct rpmsg_kdrv_driver *kddrv = to_rpmsg_kdrv_driver(drv);
+
+	if (kddrv->device_type == kddev->device_type) {
+		dev_dbg(dev, "%s: matching with driver %s\n", __func__, drv->name);
+		return 1;
+	}
+
+	dev_dbg(dev, "%s: does not match driver %s\n", __func__, drv->name);
+	return 0;
+}
+
+static int rpmsg_kdrv_dev_probe(struct device *dev)
+{
+	struct rpmsg_kdrv_device *kddev = to_rpmsg_kdrv_device(dev);
+	struct rpmsg_kdrv_driver *kddrv = to_rpmsg_kdrv_driver(kddev->dev.driver);
+	int ret;
+
+	dev_dbg(dev, "%s: probe\n", __func__);
+
+	ret = kddrv->probe(kddev);
+	if (ret) {
+		dev_err(dev, "%s: child probe failed\n", __func__);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int rpmsg_kdrv_dev_remove(struct device *dev)
+{
+	struct rpmsg_kdrv_device *kddev = to_rpmsg_kdrv_device(dev);
+	struct rpmsg_kdrv_driver *kddrv = to_rpmsg_kdrv_driver(kddev->dev.driver);
+
+	dev_dbg(dev, "%s: remove\n", __func__);
+
+	kddrv->remove(kddev);
+	return 0;
+}
+
+static int rpmsg_kdrv_probe(struct rpmsg_device *rpdev)
+{
+	int ret;
+	struct rpmsg_kdrv_priv *priv;
+
+	dev_dbg(&rpdev->dev, "%s: probing rpmsg kdrv driver\n", __func__);
+
+	priv = devm_kzalloc(&rpdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	dev_set_drvdata(&rpdev->dev, priv);
+	priv->rpdev = rpdev;
+
+	idr_init(&priv->message_idr);
+	mutex_init(&priv->message_lock);
+
+	dev_dbg(&rpdev->dev, "%s: sending device info request\n", __func__);
+	ret = rpmsg_kdrv_get_devices(rpdev);
+	if (ret) {
+		dev_err(&rpdev->dev, "%s: error collecting device info\n", __func__);
+		goto out;
+	}
+
+	return 0;
+
+out:
+	dev_set_drvdata(&rpdev->dev, NULL);
+	devm_kfree(&rpdev->dev, priv);
+	return ret;
+}
+
+static void rpmsg_kdrv_remove(struct rpmsg_device *rpdev)
+{
+	dev_dbg(&rpdev->dev, "removing rpmsg kdrv driver\n");
+
+	/* TODO check for pending responses for any of the child devices */
+	/* TODO disconnect them all */
+}
+
+static struct bus_type rpmsg_kdrv_bus = {
+	.name		= "rpmsg_kdrv",
+	.match		= rpmsg_kdrv_dev_match,
+	.probe		= rpmsg_kdrv_dev_probe,
+	.remove		= rpmsg_kdrv_dev_remove,
+};
+
+static struct rpmsg_device_id rpmsg_kdrv_id_table[] = {
+	{ .name	= "rpmsg-kdrv" },
+	{ },
+};
+
+static struct rpmsg_driver rpmsg_kdrv = {
+	.drv.name	= "rpmsg-kdrv",
+	.id_table	= rpmsg_kdrv_id_table,
+	.probe		= rpmsg_kdrv_probe,
+	.callback	= rpmsg_kdrv_cb,
+	.remove		= rpmsg_kdrv_remove,
+};
+
+static int __init rpmsg_kdrv_init(void)
+{
+	int ret;
+
+	ret = bus_register(&rpmsg_kdrv_bus);
+	if (ret) {
+		pr_err("failed to register rpmsg kdrv bus: %d\n", ret);
+		goto out;
+	}
+
+	ret = register_rpmsg_driver(&rpmsg_kdrv);
+	if (ret) {
+		pr_err("failed to register rpmsg kdrv driver: %d\n", ret);
+		goto rpdrv_fail;
+	}
+
+	pr_debug("registered rpmsg kdrv driver\n");
+
+	return 0;
+
+rpdrv_fail:
+	bus_unregister(&rpmsg_kdrv_bus);
+out:
+	return ret;
+}
+module_init(rpmsg_kdrv_init);
+
+static void __exit rpmsg_kdrv_fini(void)
+{
+	pr_debug("unregistering rpmsg kdrv driver\n");
+
+	unregister_rpmsg_driver(&rpmsg_kdrv);
+	bus_unregister(&rpmsg_kdrv_bus);
+}
+module_exit(rpmsg_kdrv_fini);
+
+MODULE_AUTHOR("Subhajit Paul <subhajit_paul@ti.com>");
+MODULE_DESCRIPTION("TI Remote-device framework Driver");
+MODULE_LICENSE("GPL v2");

+ 473 - 0
drivers/rpmsg-kdrv/rpmsg_kdrv_display.c

@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Subhajit Paul <subahjit_paul@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <drm/drm_fourcc.h>
+#include <linux/rpmsg.h>
+#include <linux/rpmsg-remotedev/rpmsg-remotedev.h>
+
+#include "shared/rpmsg-kdrv-transport-display.h"
+#include "rpmsg_kdrv_internal.h"
+
+#define RPMSG_KDRV_DISPLAY_RES_ID_FIRST		(0x10)
+
+struct rpmsg_kdrv_display_private {
+	struct rpmsg_kdrv_device *kddev;
+
+	struct rpmsg_remotedev rdev;
+
+	struct idr res_idr;
+	struct mutex res_lock;
+
+};
+
+static uint32_t check_min(uint32_t a, uint32_t b, int line)
+{
+	uint32_t res = min(a, b);
+
+	if (res != b) {
+		pr_err("Copy mismatch at Line %d\n", line);
+		WARN_ON(1);
+	}
+
+	return res;
+}
+
+static inline enum rpmsg_kdrv_display_format rpmsg_kdrv_display_fmt_to_rpmsg_fmt(uint32_t in_fmt)
+{
+	switch (in_fmt) {
+	case DRM_FORMAT_ARGB8888:
+		return RPMSG_KDRV_TP_DISPLAY_FORMAT_ARGB8888;
+	case DRM_FORMAT_XRGB8888:
+		return RPMSG_KDRV_TP_DISPLAY_FORMAT_XRGB8888;
+	default:
+		return RPMSG_KDRV_TP_DISPLAY_FORMAT_MAX;
+	}
+}
+
+static inline uint32_t rpmsg_kdrv_display_fmt_to_drm_fmt(uint32_t in_fmt)
+{
+	switch (in_fmt) {
+	case RPMSG_KDRV_TP_DISPLAY_FORMAT_ARGB8888:
+		return DRM_FORMAT_ARGB8888;
+	case RPMSG_KDRV_TP_DISPLAY_FORMAT_XRGB8888:
+		return DRM_FORMAT_XRGB8888;
+	default:
+		return 0;
+	}
+}
+
+static bool rpmsg_kdrv_display_ready(struct rpmsg_remotedev *rdev)
+{
+	struct rpmsg_kdrv_display_private *priv = container_of(rdev, struct rpmsg_kdrv_display_private, rdev);
+	struct rpmsg_kdrv_device *kddev = priv->kddev;
+	struct rpmsg_device *rpdev = kddev->rpdev;
+	struct rpmsg_kdrv_display_ready_query_request *req;
+	struct rpmsg_kdrv_display_ready_query_response *resp;
+	int ret;
+	bool retval;
+
+	req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return false;
+
+	resp = devm_kzalloc(&kddev->dev, sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		devm_kfree(&kddev->dev, req);
+		return false;
+	}
+
+	req->header.message_type = RPMSG_KDRV_TP_DISPLAY_READY_QUERY_REQUEST;
+
+	ret = rpmsg_kdrv_send_request_with_response(rpdev, kddev->device_id, req, sizeof(*req), resp, sizeof(*resp));
+	if (ret) {
+		dev_err(&kddev->dev, "%s: rpmsg_kdrv_send_request_with_response\n", __func__);
+		retval = false;
+		goto out;
+	}
+
+	if (resp->header.message_type != RPMSG_KDRV_TP_DISPLAY_READY_QUERY_RESPONSE) {
+		dev_err(&kddev->dev, "%s: wrong response type\n", __func__);
+		retval = false;
+		goto out;
+	}
+
+	retval = resp->ready ? true : false;
+
+out:
+	devm_kfree(&kddev->dev, resp);
+	devm_kfree(&kddev->dev, req);
+	return retval;
+
+}
+
+static void rpmsg_kdrv_display_copy_vid_info(struct rpmsg_remotedev_display_pipe *dst, struct rpmsg_kdrv_display_vid_info *src)
+{
+	int cnt;
+	uint32_t out_fmt;
+
+	dst->pipe_id = src->id;
+	dst->can_scale = src->can_scale ? true : false;
+	dst->can_mod_win = src->mutable_window ? true : false;
+	if (dst->can_mod_win)
+		dst->fixed_win_x = dst->fixed_win_y = dst->fixed_win_w = dst->fixed_win_h = 0;
+	else {
+		dst->fixed_win_x = src->fixed_window_x;
+		dst->fixed_win_y = src->fixed_window_y;
+		dst->fixed_win_w = src->fixed_window_w;
+		dst->fixed_win_h = src->fixed_window_h;
+	}
+	dst->initial_zorder = src->init_zorder;
+	dst->num_formats = check_min(RPMSG_REMOTEDEV_DISPLAY_MAX_FORMATS, src->num_formats, __LINE__);
+
+	dst->num_allowed_zorders = check_min(RPMSG_REMOTEDEV_DISPLAY_MAX_ZORDERS, src->num_zorders, __LINE__);
+
+	for (cnt = 0; cnt < dst->num_formats; cnt++) {
+		out_fmt = rpmsg_kdrv_display_fmt_to_drm_fmt(src->format[cnt]);
+		WARN_ON(out_fmt == 0);
+		dst->formats[cnt] = out_fmt;
+	}
+
+	for (cnt = 0; cnt < dst->num_allowed_zorders; cnt++)
+		dst->allowed_zorders[cnt] = src->zorder[cnt];
+}
+
+static void rpmsg_kdrv_display_copy_vp_info(struct rpmsg_remotedev_display_disp *dst, struct rpmsg_kdrv_display_vp_info *src)
+{
+	int vidcnt;
+
+	dst->disp_id = src->id;
+	dst->width = src->width;
+	dst->height = src->height;
+	dst->refresh = src->refresh;
+	dst->num_pipes = check_min(RPMSG_REMOTEDEV_DISPLAY_MAX_PIPES, src->num_vids, __LINE__);
+
+	for (vidcnt = 0; vidcnt < dst->num_pipes; vidcnt++)
+		rpmsg_kdrv_display_copy_vid_info(&dst->pipes[vidcnt], &src->vid[vidcnt]);
+}
+
+static int rpmsg_kdrv_display_get_res(struct rpmsg_remotedev *rdev, struct rpmsg_remotedev_display_resinfo *res)
+{
+	struct rpmsg_kdrv_display_private *priv = container_of(rdev, struct rpmsg_kdrv_display_private, rdev);
+	struct rpmsg_kdrv_device *kddev = priv->kddev;
+	struct rpmsg_device *rpdev = kddev->rpdev;
+	struct rpmsg_kdrv_display_res_info_request *req;
+	struct rpmsg_kdrv_display_res_info_response *resp;
+	int ret, vpcnt;
+
+	req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	resp = devm_kzalloc(&kddev->dev, sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		devm_kfree(&kddev->dev, req);
+		return -ENOMEM;
+	}
+
+	req->header.message_type = RPMSG_KDRV_TP_DISPLAY_RES_INFO_REQUEST;
+
+	ret = rpmsg_kdrv_send_request_with_response(rpdev, kddev->device_id, req, sizeof(*req), resp, sizeof(*resp));
+	if (ret) {
+		dev_err(&kddev->dev, "%s: rpmsg_kdrv_send_request_with_response\n", __func__);
+		goto out;
+	}
+
+	if (resp->header.message_type != RPMSG_KDRV_TP_DISPLAY_RES_INFO_RESPONSE) {
+		dev_err(&kddev->dev, "%s: wrong response type\n", __func__);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	res->num_disps = check_min(RPMSG_REMOTEDEV_DISPLAY_MAX_DISPS, resp->num_vps, __LINE__);
+
+	for (vpcnt = 0; vpcnt < res->num_disps; vpcnt++)
+		rpmsg_kdrv_display_copy_vp_info(&res->disps[vpcnt], &resp->vp[vpcnt]);
+
+out:
+	devm_kfree(&kddev->dev, resp);
+	devm_kfree(&kddev->dev, req);
+	return ret;
+}
+
+static uint32_t rpmsg_kdrv_display_res_id_new(struct rpmsg_kdrv_device *kddev, void *data)
+{
+	struct rpmsg_kdrv_display_private *priv = kddev->driver_private;
+	int id;
+
+	mutex_lock(&priv->res_lock);
+	id = idr_alloc(&priv->res_idr, data, RPMSG_KDRV_DISPLAY_RES_ID_FIRST, 0, GFP_KERNEL);
+	mutex_unlock(&priv->res_lock);
+
+	if (id < 0)
+		return 0;
+
+	return id;
+}
+
+static void rpmsg_kdrv_display_free_res_id(struct rpmsg_kdrv_device *kddev, uint32_t id)
+{
+	struct rpmsg_kdrv_display_private *priv = kddev->driver_private;
+
+	mutex_lock(&priv->res_lock);
+	idr_remove(&priv->res_idr, id);
+	mutex_unlock(&priv->res_lock);
+}
+
+static void rpmsg_kdrv_free_request_res(struct rpmsg_kdrv_device *kddev, struct rpmsg_kdrv_display_commit_request *req)
+{
+	int i;
+
+	rpmsg_kdrv_display_free_res_id(kddev, req->commit_id);
+
+	for (i = 0; i < req->num_vid_updates; i++)
+		if (req->vid[i].enabled)
+			rpmsg_kdrv_display_free_res_id(kddev, req->vid[i].buffer.buffer_id);
+
+}
+
+static bool rpmsg_kdrv_display_copy_buffer(struct rpmsg_kdrv_device *kddev, struct rpmsg_kdrv_display_buffer_info *dst,
+		struct rpmsg_remotedev_display_buffer *src)
+{
+	int i;
+
+	dst->width = src->width;
+	dst->height = src->height;
+
+	dst->format = rpmsg_kdrv_display_fmt_to_rpmsg_fmt(src->format);
+	if (WARN_ON(dst->format == RPMSG_KDRV_TP_DISPLAY_FORMAT_MAX))
+		return false;
+
+	dst->num_planes = check_min(RPMSG_KDRV_TP_DISPLAY_MAX_PLANES, src->num_planes, __LINE__);
+	if (dst->num_planes != src->num_planes)
+		return false;
+
+	for (i = 0; i < dst->num_planes; i++) {
+		dst->plane[i] = (uint64_t)src->planes[i];
+		dst->pitch[i] = src->pitches[i];
+	}
+
+	dst->buffer_id = rpmsg_kdrv_display_res_id_new(kddev, src);
+	if (!dst->buffer_id)
+		return false;
+
+	return true;
+}
+
+static bool rpmsg_kdrv_display_copy_vid_commit(struct rpmsg_kdrv_device *kddev, struct rpmsg_kdrv_display_vid_update_info *dst,
+		struct rpmsg_remotedev_display_pipe_update *src)
+{
+	dst->id = src->pipe_id;
+	dst->enabled = src->enabled ? 1 : 0;
+	if (dst->enabled) {
+		dst->dst_w = src->dst_w;
+		dst->dst_h = src->dst_h;
+		dst->dst_x = src->dst_x;
+		dst->dst_y = src->dst_y;
+
+		if (!rpmsg_kdrv_display_copy_buffer(kddev, &dst->buffer, src->buffer))
+			return false;
+	}
+
+	return true;
+}
+
+static bool rpmsg_kdrv_display_copy_commit(struct rpmsg_kdrv_device *kddev, struct rpmsg_kdrv_display_commit_request *dst,
+		struct rpmsg_remotedev_display_commit *src)
+{
+	int i, copied_vids;
+
+	dst->id = src->disp_id;
+	dst->num_vid_updates = check_min(RPMSG_KDRV_TP_DISPLAY_MAX_VIDS, src->num_pipe_updates, __LINE__);
+
+	for (i = 0, copied_vids = 0; i < dst->num_vid_updates; i++, copied_vids++)
+		if (!rpmsg_kdrv_display_copy_vid_commit(kddev, &dst->vid[i], &src->pipes[i]))
+			goto free_vid_res;
+
+	dst->commit_id = rpmsg_kdrv_display_res_id_new(kddev, src);
+	if (!dst->commit_id)
+		goto free_vid_res;
+
+	return true;
+
+free_vid_res:
+	for (i = 0; i < copied_vids; i++)
+		if (dst->vid[i].enabled)
+			rpmsg_kdrv_display_free_res_id(kddev, dst->vid[i].buffer.buffer_id);
+	return false;
+
+}
+
+static int rpmsg_kdrv_display_commit(struct rpmsg_remotedev *rdev, struct rpmsg_remotedev_display_commit *commit)
+{
+	struct rpmsg_kdrv_display_private *priv = container_of(rdev, struct rpmsg_kdrv_display_private, rdev);
+	struct rpmsg_kdrv_device *kddev = priv->kddev;
+	struct rpmsg_device *rpdev = kddev->rpdev;
+	struct rpmsg_kdrv_display_commit_request *req;
+	struct rpmsg_kdrv_display_commit_response *resp;
+	int ret;
+
+	req = devm_kzalloc(&kddev->dev, sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	resp = devm_kzalloc(&kddev->dev, sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		devm_kfree(&kddev->dev, req);
+		return -ENOMEM;
+	}
+
+	req->header.message_type = RPMSG_KDRV_TP_DISPLAY_COMMIT_REQUEST;
+
+	if (!rpmsg_kdrv_display_copy_commit(kddev, req, commit)) {
+		dev_err(&kddev->dev, "%s: failed to copy commit request\n", __func__);
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = rpmsg_kdrv_send_request_with_response(rpdev, kddev->device_id, req, sizeof(*req),
+			resp, sizeof(*resp));
+	if (ret) {
+		dev_err(&kddev->dev, "%s: rpmsg_kdrv_send_request_with_response\n", __func__);
+		goto nosend;
+	}
+
+
+	if (resp->header.message_type != RPMSG_KDRV_TP_DISPLAY_COMMIT_RESPONSE) {
+		dev_err(&kddev->dev, "%s: wrong response type\n", __func__);
+		goto out;
+	}
+
+	ret = ((resp->status == 0) ? 0 : -EINVAL);
+	goto out;
+
+nosend:
+	rpmsg_kdrv_free_request_res(kddev, req);
+out:
+	devm_kfree(&kddev->dev, req);
+	devm_kfree(&kddev->dev, resp);
+	return ret;
+}
+
+
+struct rpmsg_remotedev_display_ops disp_ops = {
+	.ready = rpmsg_kdrv_display_ready,
+	.get_res_info = rpmsg_kdrv_display_get_res,
+	.commit = rpmsg_kdrv_display_commit,
+};
+
+static void rpmsg_kdrv_display_device_init(struct rpmsg_kdrv_device *kddev, void *data, int len)
+{
+}
+
+static int rpmsg_kdrv_display_probe(struct rpmsg_kdrv_device *dev)
+{
+	struct rpmsg_kdrv_display_private *priv;
+
+	dev_dbg(&dev->dev, "%s\n", __func__);
+
+	priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->rdev.type = RPMSG_REMOTEDEV_DISPLAY_DEVICE;
+	priv->rdev.device.display.ops = &disp_ops;
+
+	mutex_init(&priv->res_lock);
+	idr_init(&priv->res_idr);
+
+	priv->kddev = dev;
+	dev->driver_private = priv;
+	dev->remotedev = &priv->rdev;
+
+	rpmsg_kdrv_display_device_init(dev, dev->device_data, dev->device_data_len);
+
+	return 0;
+}
+
+static void rpmsg_kdrv_display_remove(struct rpmsg_kdrv_device *dev)
+{
+	dev_dbg(&dev->dev, "%s\n", __func__);
+}
+
+static void rpmsg_kdrv_display_handle_commit(struct rpmsg_kdrv_device *dev, struct rpmsg_kdrv_display_commit_done_message *msg)
+{
+	struct rpmsg_kdrv_display_private *priv = dev->driver_private;
+	struct rpmsg_remotedev *rdev = &priv->rdev;
+	struct rpmsg_remotedev_display_commit *commit;
+
+	mutex_lock(&priv->res_lock);
+	commit = idr_find(&priv->res_idr, msg->commit_id);
+	idr_remove(&priv->res_idr, msg->commit_id);
+	mutex_unlock(&priv->res_lock);
+
+	if (!commit) {
+		dev_err(&dev->dev, "%s: no pending commit found\n", __func__);
+		return;
+	}
+
+	if (rdev->device.display.cb_ops && rdev->device.display.cb_ops->commit_done)
+		rdev->device.display.cb_ops->commit_done(commit, rdev->cb_data);
+}
+
+static void rpmsg_kdrv_display_handle_buffer(struct rpmsg_kdrv_device *dev, struct rpmsg_kdrv_display_buffer_done_message *msg)
+{
+	struct rpmsg_kdrv_display_private *priv = dev->driver_private;
+	struct rpmsg_remotedev *rdev = &priv->rdev;
+	struct rpmsg_remotedev_display_buffer *buffer;
+
+	mutex_lock(&priv->res_lock);
+	buffer = idr_find(&priv->res_idr, msg->buffer_id);
+	idr_remove(&priv->res_idr, msg->buffer_id);
+	mutex_unlock(&priv->res_lock);
+
+	if (!buffer) {
+		dev_err(&dev->dev, "%s: no pending buffer found\n", __func__);
+		return;
+	}
+
+	if (rdev->device.display.cb_ops && rdev->device.display.cb_ops->buffer_done)
+		rdev->device.display.cb_ops->buffer_done(buffer, rdev->cb_data);
+}
+
+static int rpmsg_kdrv_display_callback(struct rpmsg_kdrv_device *dev, void *msg, int len)
+{
+	struct rpmsg_kdrv_display_message_header *hdr = msg;
+
+	if (hdr->message_type == RPMSG_KDRV_TP_DISPLAY_COMMIT_DONE_MESSAGE)
+		rpmsg_kdrv_display_handle_commit(dev, msg);
+	else if (hdr->message_type == RPMSG_KDRV_TP_DISPLAY_BUFFER_DONE_MESSAGE)
+		rpmsg_kdrv_display_handle_buffer(dev, msg);
+
+	return 0;
+}
+
+
+struct rpmsg_kdrv_driver rpmsg_kdrv_display = {
+	.drv.name = "rpmsg-kdrv-display",
+	.device_type = RPMSG_KDRV_TP_DEVICE_TYPE_DISPLAY,
+	.probe = rpmsg_kdrv_display_probe,
+	.remove = rpmsg_kdrv_display_remove,
+	.callback = rpmsg_kdrv_display_callback,
+};
+
+static int __init rpmsg_kdrv_display_driver_init(void)
+{
+	return rpmsg_kdrv_register_driver(&rpmsg_kdrv_display);
+}
+module_init(rpmsg_kdrv_display_driver_init);
+
+static void rpmsg_kdrv_display_driver_fini(void)
+{
+}
+module_exit(rpmsg_kdrv_display_driver_fini);
+
+MODULE_AUTHOR("Subhajit Paul <subhajit_paul@ti.com>");
+MODULE_DESCRIPTION("TI Remote-device Virtual Display Driver");
+MODULE_LICENSE("GPL v2");

+ 48 - 0
drivers/rpmsg-kdrv/rpmsg_kdrv_internal.h

@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Subhajit Paul <subhajit_paul@ti.com>
+ */
+
+#ifndef __RPMSG_KDRV_INTERNAL_H__
+#define __RPMSG_KDRV_INTERNAL_H__
+
+#define RRMSG_KDRV_CALLBACK_DONE		(0)
+#define RRMSG_KDRV_CALLBACK_MORE		(1)
+
+struct rpmsg_kdrv_device {
+	struct device dev;
+	struct rpmsg_device *rpdev;
+	int device_type;
+	int device_id;
+	void *device_data;
+	int device_data_len;
+	char *device_name;
+	void *device_private;
+	void *driver_private;
+	struct rpmsg_remotedev *remotedev;
+};
+
+struct rpmsg_kdrv_driver {
+	struct device_driver drv;
+	int device_type;
+	int (*probe)(struct rpmsg_kdrv_device *dev);
+	void (*remove)(struct rpmsg_kdrv_device *dev);
+	int (*callback)(struct rpmsg_kdrv_device *dev, void *msg, int len);
+};
+
+typedef int (*request_cb_t)(void *data, void *req, int req_sz, void *resp, int resp_sz);
+
+extern int rpmsg_kdrv_register_driver(struct rpmsg_kdrv_driver *drv);
+
+extern int rpmsg_kdrv_send_request_with_callback(struct rpmsg_device *rpdev,
+		uint32_t device_id, void *message, uint32_t message_size, void *cb_data,
+		request_cb_t callback);
+extern int rpmsg_kdrv_send_request_with_response(struct rpmsg_device *rpdev,
+		uint32_t device_id, void *message, uint32_t message_size,
+		void *response, uint32_t response_size);
+extern int rpmsg_kdrv_send_message(struct rpmsg_device *rpdev,
+		uint32_t device_id, void *message, uint32_t message_size);
+
+
+#endif

+ 78 - 0
drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport-common.h

@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Subhajit Paul <subhajit_paul@ti.com>
+ */
+
+#ifndef __RPMSG_KDRV_TRANSPORT_COMMON_H__
+#define __RPMSG_KDRV_TRANSPORT_COMMON_H__
+
+/*
+ * Device types supported by RPMSG-KDRV framework
+ * Currently supported device types: display
+ * Planned future support for capture and i2c devices
+ */
+#define RPMSG_KDRV_TP_DEVICE_TYPE_INIT		(0x0)
+#define RPMSG_KDRV_TP_DEVICE_TYPE_DISPLAY	(0x1)
+#define RPMSG_KDRV_TP_DEVICE_TYPE_DEMO		(0x2)
+/* More device types here*/
+#define RPMSG_KDRV_TP_DEVICE_TYPE_MAX		(0x3)
+
+/*
+ * Maximum number of proxy devices per remotecore
+ */
+#define RPMSG_KDRV_TP_MAX_DEVICES		(2)
+
+/*
+ * Maximum length of proxy device name
+ */
+#define RPMSG_KDRV_TP_DEVICE_NAME_LEN		(32)
+
+/*
+ * Statically assigned device ID for init device
+ * Remote device framework dynamically assigns device
+ * IDs for other devices. All dynamically assigned IDs
+ * are greater than RPMSG_KDRV_TP_DEVICE_ID_INIT
+ */
+#define RPMSG_KDRV_TP_DEVICE_ID_INIT		(0)
+
+/*
+ * Packet IDs are assigned dynamically (for REQUEST packets)
+ * starting from RPMSG_KDRV_TP_PACKET_ID_FIRST
+ * For MESSAGE packets, framework can use RPMSG_KDRV_TP_PACKET_ID_NONE
+ */
+#define RPMSG_KDRV_TP_PACKET_ID_NONE		(0x10)
+#define RPMSG_KDRV_TP_PACKET_ID_FIRST		(RPMSG_KDRV_TP_PACKET_ID_NONE + 1)
+
+enum rpmsg_kdrv_packet_source {
+	RPMSG_KDRV_TP_PACKET_SOURCE_SERVER,
+	RPMSG_KDRV_TP_PACKET_SOURCE_CLIENT,
+	RPMSG_KDRV_TP_PACKET_SOURCE_MAX,
+};
+
+enum rpmsg_kdrv_packet_type {
+	RPMSG_KDRV_TP_PACKET_TYPE_REQUEST,
+	RPMSG_KDRV_TP_PACKET_TYPE_RESPONSE,
+	RPMSG_KDRV_TP_PACKET_TYPE_MESSAGE,
+	RPMSG_KDRV_TP_PACKET_TYPE_MAX,
+};
+
+/*RPMSG_KDRV message :
+ * => device_header
+ * => message_header : defined by each device type
+ * => request / response / message payload
+ */
+struct rpmsg_kdrv_device_header {
+	/* ID of device sending the packet */
+	u8 device_id;
+	/* enum: rpmsg_kdrv_packet_type */
+	u8 packet_type;
+	/* enum: rpmsg_kdrv_packet_source */
+	u8 packet_source;
+	/* dynamically assigned packet ID for response matching */
+	u32 packet_id;
+	/* size of packet */
+	u32 packet_size;
+} __packed;
+
+#endif

+ 78 - 0
drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport-demo.h

@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Subhajit Paul <subhajit_paul@ti.com>
+ */
+
+#ifndef __RPMSG_KDRV_TRANSPORT_DEMODEV_H__
+#define __RPMSG_KDRV_TRANSPORT_DEMODEV_H__
+
+#include "rpmsg-kdrv-transport-common.h"
+
+enum rpmsg_kdrv_display_message_type {
+	RPMSG_KDRV_TP_DEMODEV_PING_REQUEST,
+	RPMSG_KDRV_TP_DEMODEV_PING_RESPONSE,
+	RPMSG_KDRV_TP_DEMODEV_S2C_MESSAGE,
+	RPMSG_KDRV_TP_DEMODEV_C2S_MESSAGE,
+	RPMSG_KDRV_TP_DEMODEV_MAX,
+};
+
+/*
+ * Maximum length of demo device data
+ */
+#define RPMSG_KDRV_TP_DEMODEV_DEVICE_DATA_LEN	(32)
+
+/*
+ * Maximum length of demo device message data
+ */
+#define RPMSG_KDRV_TP_DEMODEV_MESSAGE_DATA_LEN	(128)
+
+/*
+ * per-device data for demo device
+ */
+struct rpmsg_kdrv_demodev_device_data {
+	/* Does the device send all vsyncs? */
+	u8 charString[RPMSG_KDRV_TP_DEMODEV_DEVICE_DATA_LEN];
+} __packed;
+
+/*
+ * message header for demo device
+ */
+struct rpmsg_kdrv_demodev_message_header {
+	/* enum: rpmsg_kdrv_demodev_message_type */
+	u8 message_type;
+} __packed;
+
+/* demo device ping request - always client to server */
+struct rpmsg_kdrv_demodev_ping_request {
+	/* message header */
+	struct rpmsg_kdrv_demodev_message_header header;
+	/* ping data */
+	u8 data[RPMSG_KDRV_TP_DEMODEV_MESSAGE_DATA_LEN];
+} __packed;
+
+/* demo device ping response - always server to client */
+struct rpmsg_kdrv_demodev_ping_response {
+	/* message header */
+	struct rpmsg_kdrv_demodev_message_header header;
+	/* ping data */
+	u8 data[RPMSG_KDRV_TP_DEMODEV_MESSAGE_DATA_LEN];
+} __packed;
+
+/* demo device server to client one-way message */
+struct rpmsg_kdrv_demodev_s2c_message {
+	/* message header */
+	struct rpmsg_kdrv_demodev_message_header header;
+	/* message data */
+	u8 data[RPMSG_KDRV_TP_DEMODEV_MESSAGE_DATA_LEN];
+} __packed;
+
+/* demo device client to server one-way message */
+struct rpmsg_kdrv_demodev_c2s_message {
+	/* message header */
+	struct rpmsg_kdrv_demodev_message_header header;
+	/* message data */
+	u8 data[RPMSG_KDRV_TP_DEMODEV_MESSAGE_DATA_LEN];
+} __packed;
+
+#endif

+ 223 - 0
drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport-display.h

@@ -0,0 +1,223 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Subhajit Paul <subhajit_paul@ti.com>
+ */
+
+#ifndef __RPMSG_KDRV_TRANSPORT_DISPLAY_H__
+#define __RPMSG_KDRV_TRANSPORT_DISPLAY_H__
+
+#include "rpmsg-kdrv-transport-common.h"
+
+/*
+ * Maximum number of planes per buffer
+ */
+#define RPMSG_KDRV_TP_DISPLAY_MAX_PLANES	(2)
+
+/*
+ * Maximum number of shared displays
+ */
+#define RPMSG_KDRV_TP_DISPLAY_MAX_VPS		(2)
+
+/*
+ * Maximum number of pipes per shared display
+ */
+#define RPMSG_KDRV_TP_DISPLAY_MAX_VIDS		(4)
+
+/*
+ * Maximum number of formats supported per pipe
+ */
+#define RPMSG_KDRV_TP_DISPLAY_MAX_FORMATS	(2)
+
+/*
+ * Maximum number of zorders supported per pipe
+ */
+#define RPMSG_KDRV_TP_DISPLAY_MAX_ZORDERS	(4)
+
+enum rpmsg_kdrv_display_format {
+	RPMSG_KDRV_TP_DISPLAY_FORMAT_ARGB8888,
+	RPMSG_KDRV_TP_DISPLAY_FORMAT_XRGB8888,
+	RPMSG_KDRV_TP_DISPLAY_FORMAT_MAX,
+};
+
+enum rpmsg_kdrv_display_message_type {
+	RPMSG_KDRV_TP_DISPLAY_READY_QUERY_REQUEST,
+	RPMSG_KDRV_TP_DISPLAY_READY_QUERY_RESPONSE,
+	RPMSG_KDRV_TP_DISPLAY_RES_INFO_REQUEST,
+	RPMSG_KDRV_TP_DISPLAY_RES_INFO_RESPONSE,
+	RPMSG_KDRV_TP_DISPLAY_COMMIT_REQUEST,
+	RPMSG_KDRV_TP_DISPLAY_COMMIT_RESPONSE,
+	RPMSG_KDRV_TP_DISPLAY_COMMIT_DONE_MESSAGE,
+	RPMSG_KDRV_TP_DISPLAY_BUFFER_DONE_MESSAGE,
+	RPMSG_KDRV_TP_DISPLAY_MAX,
+};
+
+/*
+ * per-device data for display device
+ */
+struct rpmsg_kdrv_display_device_data {
+	/* Does the device send all vsyncs? */
+	u8 periodic_vsync;
+	/*Does the device defer the use of buffers? */
+	u8 deferred_buffer_usage;
+} __packed;
+
+/*
+ * message header for display device
+ */
+struct rpmsg_kdrv_display_message_header {
+	/* enum: rpmsg_kdrv_display_message_type */
+	u8 message_type;
+} __packed;
+
+/* display device request to provide ready / not-ready info */
+struct rpmsg_kdrv_display_ready_query_request {
+	/* message header */
+	struct rpmsg_kdrv_display_message_header header;
+} __packed;
+
+/* display device response indicating ready / not-ready status */
+struct rpmsg_kdrv_display_ready_query_response {
+	/* message header */
+	struct rpmsg_kdrv_display_message_header header;
+	/* can be 0 : if not ready 1: if ready */
+	u8 ready;
+} __packed;
+
+/* display device buffer update info */
+struct rpmsg_kdrv_display_buffer_info {
+	/* buffer width */
+	u16 width;
+	/* buffer height */
+	u16 height;
+	/* enum: rpmsg_kdrv_display_format */
+	u8 format;
+	/* number of planes */
+	u8 num_planes;
+	/* per plane start addresses */
+	u64 plane[RPMSG_KDRV_TP_DISPLAY_MAX_PLANES];
+	/* per plane pitch */
+	u16 pitch[RPMSG_KDRV_TP_DISPLAY_MAX_PLANES];
+	/* buffer id : to be used in buffer-done message */
+	u32 buffer_id;
+} __packed;
+
+/* display device pipe update info */
+struct rpmsg_kdrv_display_vid_update_info {
+	/* pipe ID */
+	u8 id;
+	/*enable / disable request */
+	u8 enabled;
+	/* window width */
+	u16 dst_w;
+	/* window height */
+	u16 dst_h;
+	/* window position X */
+	u16 dst_x;
+	/* window position Y */
+	u16 dst_y;
+	/* buffer */
+	struct rpmsg_kdrv_display_buffer_info buffer;
+} __packed;
+
+/* display device commit request */
+struct rpmsg_kdrv_display_commit_request {
+	/* message header */
+	struct rpmsg_kdrv_display_message_header header;
+	/*ID of shared display */
+	u8 id;
+	/* number of pipe updates in the commit */
+	u8 num_vid_updates;
+	/* list of pipe updates */
+	struct rpmsg_kdrv_display_vid_update_info vid[RPMSG_KDRV_TP_DISPLAY_MAX_VIDS];
+	/*commit id : to be used in commit-done message */
+	u32 commit_id;
+} __packed;
+
+/* display device commit response */
+struct rpmsg_kdrv_display_commit_response {
+	/* message header */
+	struct rpmsg_kdrv_display_message_header header;
+	/*commit id : from commit request */
+	u32 commit_id;
+	/*status : 0 = accepted, 1 = rejected */
+	u8 status;
+} __packed;
+
+/* display device commit done message */
+struct rpmsg_kdrv_display_commit_done_message {
+	/* message header */
+	struct rpmsg_kdrv_display_message_header header;
+	/* commit id : from commit request */
+	u32 commit_id;
+} __packed;
+
+/*display device buffer deferred release message */
+struct rpmsg_kdrv_display_buffer_done_message {
+	/* message header */
+	struct rpmsg_kdrv_display_message_header header;
+	/* buffer id: from bufer_info */
+	u32 buffer_id;
+} __packed;
+
+/* display device request to provide list of shared resources */
+struct rpmsg_kdrv_display_res_info_request {
+	/* message header */
+	struct rpmsg_kdrv_display_message_header header;
+} __packed;
+
+/* display device shared pipe */
+struct rpmsg_kdrv_display_vid_info {
+	/* pipe ID */
+	u8 id;
+	/* is pipe window fixed on display? */
+	u8 mutable_window;
+	/* fixed window position X, if applicable */
+	u16 fixed_window_x;
+	/* fixed window position Y, if applicable */
+	u16 fixed_window_y;
+	/* fixed window width, if applicable */
+	u16 fixed_window_w;
+	/* fixed window height, if applicable */
+	u16 fixed_window_h;
+	/* can pipe scale buffers? */
+	u8 can_scale;
+	/* number of formats supported */
+	u8 num_formats;
+	/*enum: rpmsg_kdrv_display_format */
+	u8 format[RPMSG_KDRV_TP_DISPLAY_MAX_FORMATS];
+	/* initial zorder of pipe */
+	u8 init_zorder;
+	/* number of allowed zorders */
+	u8 num_zorders;
+	/* list of allowed zorders */
+	u8 zorder[RPMSG_KDRV_TP_DISPLAY_MAX_ZORDERS];
+} __packed;
+
+/* display device shared display */
+struct rpmsg_kdrv_display_vp_info {
+	/* ID of shared display */
+	u8 id;
+	/* raster width */
+	u16 width;
+	/* raster height */
+	u16 height;
+	/* refresh rate */
+	u8 refresh;
+	/* number of pipes for this display */
+	u8 num_vids;
+	/* list of pipes */
+	struct rpmsg_kdrv_display_vid_info vid[RPMSG_KDRV_TP_DISPLAY_MAX_VIDS];
+} __packed;
+
+/* display device response providing list of shared resources */
+struct rpmsg_kdrv_display_res_info_response {
+	/* message header */
+	struct rpmsg_kdrv_display_message_header header;
+	/* number of shared displays */
+	u8 num_vps;
+	/* list of shared displays */
+	struct rpmsg_kdrv_display_vp_info vp[RPMSG_KDRV_TP_DISPLAY_MAX_VPS];
+} __packed;
+
+#endif

+ 83 - 0
drivers/rpmsg-kdrv/shared/rpmsg-kdrv-transport.h

@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Subhajit Paul <subhajit_paul@ti.com>
+ */
+
+#ifndef __RPMSG_KDRV_TRANSPORT_H__
+#define __RPMSG_KDRV_TRANSPORT_H__
+
+#include "rpmsg-kdrv-transport-common.h"
+
+enum rpmsg_kdrv_init_message_type {
+	RPMSG_KDRV_TP_INIT_DEV_INFO_REQUEST,
+	RPMSG_KDRV_TP_INIT_DEV_INFO_RESPONSE,
+	RPMSG_KDRV_TP_INIT_CONNECT_MESSAGE,
+	RPMSG_KDRV_TP_INIT_DISCONNECT_MESSAGE,
+	RPMSG_KDRV_TP_INIT_MAX,
+};
+
+/*
+ * message header for init device
+ */
+struct rpmsg_kdrv_init_message_header {
+	/* enum: rpmsg_kdrv_init_message_type */
+	u8 message_type;
+} __packed;
+
+/*
+ * init device request to provide list of devices
+ */
+struct rpmsg_kdrv_init_dev_info_request {
+	/* message header */
+	struct rpmsg_kdrv_init_message_header header;
+} __packed;
+
+struct rpmsg_kdrv_init_device_info {
+	/* device id */
+	u8 device_id;
+	/* device type (display, capture etc) */
+	u8 device_type;
+	/* name of device */
+	u8 device_name[RPMSG_KDRV_TP_DEVICE_NAME_LEN];
+	/* device specific info length */
+	u16 device_data_len;
+	/* per device-type info offset */
+	u16 device_data_offset;
+} __packed;
+
+/*
+ * init device response with list of devices
+ */
+struct rpmsg_kdrv_init_dev_info_response {
+	/* message header */
+	struct rpmsg_kdrv_init_message_header header;
+	/*number of exported devices */
+	u8 num_devices;
+	/* list of exported devices */
+	struct rpmsg_kdrv_init_device_info devices[RPMSG_KDRV_TP_MAX_DEVICES];
+	/* device specific data */
+	u8 device_data[0];
+} __packed;
+
+/*
+ * init device per-device connect message
+ */
+struct rpmsg_kdrv_init_connect_message {
+	/* message header */
+	struct rpmsg_kdrv_init_message_header header;
+	/* device ID to connect */
+	u8 device_id;
+} __packed;
+
+/*
+ * init device per-device disconnect message
+ */
+struct rpmsg_kdrv_init_disconnect_message {
+	/* message header */
+	struct rpmsg_kdrv_init_message_header header;
+	/* device ID to disconnect */
+	u8 device_id;
+} __packed;
+
+#endif

+ 117 - 0
include/linux/rpmsg-remotedev/rpmsg-remotedev.h

@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Subhajit Paul <subhajit_paul@ti.com>
+ */
+
+#ifndef __RPMSG_REMOTEDEV_H__
+#define __RPMSG_REMOTEDEV_H__
+
+#define RPMSG_REMOTEDEV_DISPLAY_MAX_PLANES		(3)
+#define RPMSG_REMOTEDEV_DISPLAY_MAX_DISPS		(8)
+#define RPMSG_REMOTEDEV_DISPLAY_MAX_PIPES		(8)
+#define RPMSG_REMOTEDEV_DISPLAY_MAX_FORMATS		(32)
+#define RPMSG_REMOTEDEV_DISPLAY_MAX_ZORDERS		(8)
+
+struct rpmsg_remotedev;
+
+struct rpmsg_remotedev_display_buffer {
+	uint32_t width;
+	uint32_t height;
+	uint32_t format;
+	uint32_t num_planes;
+	dma_addr_t planes[RPMSG_REMOTEDEV_DISPLAY_MAX_PLANES];
+	uint32_t pitches[RPMSG_REMOTEDEV_DISPLAY_MAX_PLANES];
+	struct rpmsg_remotedev *rdev;
+	void *priv;
+};
+
+struct rpmsg_remotedev_display_pipe_update {
+	uint32_t pipe_id;
+	bool enabled;
+	uint32_t dst_w;
+	uint32_t dst_h;
+	uint32_t dst_x;
+	uint32_t dst_y;
+	struct rpmsg_remotedev_display_buffer *buffer;
+};
+
+struct rpmsg_remotedev_display_commit {
+	uint32_t disp_id;
+	uint32_t num_pipe_updates;
+	struct rpmsg_remotedev_display_pipe_update pipes[RPMSG_REMOTEDEV_DISPLAY_MAX_PIPES];
+	struct rpmsg_remotedev *rdev;
+	void *priv;
+};
+
+struct rpmsg_remotedev_display_pipe {
+	uint32_t pipe_id;
+	bool can_scale;
+	bool can_mod_win;
+	uint32_t fixed_win_x;
+	uint32_t fixed_win_y;
+	uint32_t fixed_win_w;
+	uint32_t fixed_win_h;
+	uint32_t initial_zorder;
+	uint32_t num_formats;
+	uint32_t formats[RPMSG_REMOTEDEV_DISPLAY_MAX_FORMATS];
+	uint32_t num_allowed_zorders;
+	uint32_t allowed_zorders[RPMSG_REMOTEDEV_DISPLAY_MAX_ZORDERS];
+};
+
+struct rpmsg_remotedev_display_disp {
+	uint32_t disp_id;
+	uint32_t width;
+	uint32_t height;
+	uint32_t refresh;
+	uint32_t num_pipes;
+	struct rpmsg_remotedev_display_pipe pipes[RPMSG_REMOTEDEV_DISPLAY_MAX_PIPES];
+};
+
+struct rpmsg_remotedev_display_resinfo {
+	uint32_t num_disps;
+	struct rpmsg_remotedev_display_disp disps[RPMSG_REMOTEDEV_DISPLAY_MAX_DISPS];
+};
+
+struct rpmsg_remotedev_display_cb {
+	void (*commit_done)(struct rpmsg_remotedev_display_commit *commit, void *cb_data);
+	void (*buffer_done)(struct rpmsg_remotedev_display_buffer *buffer, void *cb_data);
+};
+
+struct rpmsg_remotedev_display_ops {
+	bool (*ready)(struct rpmsg_remotedev *rdev);
+	int (*get_res_info)(struct rpmsg_remotedev *rdev, struct rpmsg_remotedev_display_resinfo *res);
+	int (*commit)(struct rpmsg_remotedev *rdev, struct rpmsg_remotedev_display_commit *commit);
+};
+
+enum rpmsg_remotedev_type {
+	RPMSG_REMOTEDEV_DISPLAY_DEVICE,
+};
+
+struct rpmsg_remotedev {
+	enum rpmsg_remotedev_type type;
+	union {
+		struct {
+			struct rpmsg_remotedev_display_ops *ops;
+			struct rpmsg_remotedev_display_cb *cb_ops;
+		} display;
+	} device;
+	void *cb_data;
+
+};
+
+#if IS_REACHABLE(CONFIG_RPMSG_KDRV)
+extern struct rpmsg_remotedev *rpmsg_remotedev_get_named_device(const char *device_name);
+extern void rpmsg_remotedev_put_device(struct rpmsg_remotedev *rdev);
+#else
+static inline struct rpmsg_remotedev * __maybe_unused rpmsg_remotedev_get_named_device(const char *device_name)
+{
+	return NULL;
+}
+
+static inline void __maybe_unused rpmsg_remotedev_put_device(struct rpmsg_remotedev *rdev)
+{
+}
+#endif
+
+#endif

+ 3 - 0
ti_config_fragments/audio_display.cfg

@@ -152,3 +152,6 @@ CONFIG_SND_SOC_TLV320AIC31XX=m
 CONFIG_SND_SOC_PCM3168A_I2C=m
 CONFIG_SND_SOC_HDMI_CODEC=m
 # sound - END
+
+# display sharing
+CONFIG_RPMSG_KDRV_DISPLAY=y