Эх сурвалжийг харах

Merge branch 'topic/bxt-stage1' into drm-intel-next-queued

Separate topic branch for bxt didn't work out since we needed to
refactor the gmbus code a bit to make it look decent. So backmerge.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Daniel Vetter 10 жил өмнө
parent
commit
c5fe557dde
100 өөрчлөгдсөн 6057 нэмэгдсэн , 793 устгасан
  1. 3 1
      Documentation/devicetree/bindings/net/dsa/dsa.txt
  2. 7 0
      Documentation/devicetree/bindings/panel/ampire,am800480r3tmqwa1h.txt
  3. 7 0
      Documentation/devicetree/bindings/panel/auo,b101ean01.txt
  4. 7 0
      Documentation/devicetree/bindings/panel/innolux,at043tn24.txt
  5. 7 0
      Documentation/devicetree/bindings/panel/innolux,zj070na-01p.txt
  6. 7 0
      Documentation/devicetree/bindings/panel/ortustech,com43h4m85ulc.txt
  7. 7 0
      Documentation/devicetree/bindings/panel/samsung,ltn140at29-301.txt
  8. 7 0
      Documentation/devicetree/bindings/panel/shelly,sca07010-bfn-lnn.txt
  9. 2 0
      Documentation/devicetree/bindings/vendor-prefixes.txt
  10. 8 0
      Documentation/input/alps.txt
  11. 6 0
      Documentation/input/event-codes.txt
  12. 6 3
      Documentation/input/multi-touch-protocol.txt
  13. 14 17
      MAINTAINERS
  14. 1 1
      Makefile
  15. 1 1
      arch/powerpc/include/asm/cputhreads.h
  16. 5 5
      arch/x86/kernel/cpu/perf_event_intel.c
  17. 1 0
      arch/x86/kernel/early-quirks.c
  18. 15 1
      arch/x86/kernel/entry_64.S
  19. 1 1
      arch/x86/kernel/kgdb.c
  20. 10 0
      arch/x86/kernel/reboot.c
  21. 9 1
      arch/x86/xen/p2m.c
  22. 3 3
      block/blk-settings.c
  23. 13 2
      drivers/ata/libata-core.c
  24. 1 0
      drivers/dma/bcm2835-dma.c
  25. 7 0
      drivers/dma/dma-jz4740.c
  26. 7 0
      drivers/dma/edma.c
  27. 3 1
      drivers/dma/moxart-dma.c
  28. 1 0
      drivers/dma/omap-dma.c
  29. 7 15
      drivers/firmware/dmi_scan.c
  30. 1 1
      drivers/gpio/gpio-mpc8xxx.c
  31. 1 1
      drivers/gpio/gpio-syscon.c
  32. 10 0
      drivers/gpio/gpiolib-acpi.c
  33. 9 0
      drivers/gpu/drm/Kconfig
  34. 1 0
      drivers/gpu/drm/Makefile
  35. 7 6
      drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
  36. 12 5
      drivers/gpu/drm/amd/amdkfd/kfd_device.c
  37. 11 7
      drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
  38. 7 10
      drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
  39. 1 11
      drivers/gpu/drm/amd/amdkfd/kfd_module.c
  40. 7 6
      drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
  41. 8 6
      drivers/gpu/drm/amd/amdkfd/kfd_priv.h
  42. 6 0
      drivers/gpu/drm/amd/amdkfd/kfd_process.c
  43. 7 5
      drivers/gpu/drm/amd/amdkfd/kfd_topology.c
  44. 32 32
      drivers/gpu/drm/amd/include/kgd_kfd_interface.h
  45. 21 0
      drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
  46. 4 15
      drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
  47. 3 0
      drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
  48. 1 0
      drivers/gpu/drm/bochs/bochs_hw.c
  49. 11 0
      drivers/gpu/drm/bridge/Kconfig
  50. 1 0
      drivers/gpu/drm/bridge/Makefile
  51. 684 0
      drivers/gpu/drm/bridge/ps8622.c
  52. 1 1
      drivers/gpu/drm/bridge/ptn3460.c
  53. 22 5
      drivers/gpu/drm/drm_atomic.c
  54. 157 43
      drivers/gpu/drm/drm_atomic_helper.c
  55. 1 1
      drivers/gpu/drm/drm_bridge.c
  56. 5 0
      drivers/gpu/drm/drm_crtc.c
  57. 5 4
      drivers/gpu/drm/drm_crtc_helper.c
  58. 60 20
      drivers/gpu/drm/drm_dp_helper.c
  59. 13 0
      drivers/gpu/drm/drm_dp_mst_topology.c
  60. 1 1
      drivers/gpu/drm/drm_drv.c
  61. 1 0
      drivers/gpu/drm/drm_edid_load.c
  62. 1 1
      drivers/gpu/drm/drm_fb_cma_helper.c
  63. 38 16
      drivers/gpu/drm/drm_fb_helper.c
  64. 1 0
      drivers/gpu/drm/drm_info.c
  65. 1 1
      drivers/gpu/drm/drm_ioc32.c
  66. 28 32
      drivers/gpu/drm/drm_ioctl.c
  67. 7 1
      drivers/gpu/drm/drm_modes.c
  68. 1 0
      drivers/gpu/drm/drm_pci.c
  69. 6 5
      drivers/gpu/drm/drm_plane_helper.c
  70. 1 0
      drivers/gpu/drm/drm_probe_helper.c
  71. 1 0
      drivers/gpu/drm/drm_vm.c
  72. 3 2
      drivers/gpu/drm/exynos/exynos_drm_fbdev.c
  73. 5 3
      drivers/gpu/drm/exynos/exynos_drm_fimd.c
  74. 10 7
      drivers/gpu/drm/exynos/exynos_mixer.c
  75. 102 95
      drivers/gpu/drm/i2c/adv7511.c
  76. 120 73
      drivers/gpu/drm/i915/i915_debugfs.c
  77. 143 110
      drivers/gpu/drm/i915/i915_dma.c
  78. 14 1
      drivers/gpu/drm/i915/i915_drv.c
  79. 6 1
      drivers/gpu/drm/i915/i915_drv.h
  80. 13 3
      drivers/gpu/drm/i915/i915_gem_gtt.c
  81. 1 1
      drivers/gpu/drm/i915/i915_gem_stolen.c
  82. 14 12
      drivers/gpu/drm/i915/i915_reg.h
  83. 1 1
      drivers/gpu/drm/i915/intel_ddi.c
  84. 10 1
      drivers/gpu/drm/i915/intel_display.c
  85. 18 1
      drivers/gpu/drm/i915/intel_lrc.c
  86. 31 2
      drivers/gpu/drm/i915/intel_pm.c
  87. 19 2
      drivers/gpu/drm/i915/intel_ringbuffer.c
  88. 1 1
      drivers/gpu/drm/i915/intel_sprite.c
  89. 11 0
      drivers/gpu/drm/msm/Kconfig
  90. 5 0
      drivers/gpu/drm/msm/Makefile
  91. 212 0
      drivers/gpu/drm/msm/dsi/dsi.c
  92. 117 0
      drivers/gpu/drm/msm/dsi/dsi.h
  93. 376 42
      drivers/gpu/drm/msm/dsi/dsi.xml.h
  94. 1993 0
      drivers/gpu/drm/msm/dsi/dsi_host.c
  95. 705 0
      drivers/gpu/drm/msm/dsi/dsi_manager.c
  96. 352 0
      drivers/gpu/drm/msm/dsi/dsi_phy.c
  97. 34 0
      drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
  98. 258 141
      drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
  99. 97 5
      drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
  100. 17 1
      drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h

+ 3 - 1
Documentation/devicetree/bindings/net/dsa/dsa.txt

@@ -19,7 +19,9 @@ the parent DSA node. The maximum number of allowed child nodes is 4
 (DSA_MAX_SWITCHES).
 Each of these switch child nodes should have the following required properties:
 
-- reg			: Describes the switch address on the MII bus
+- reg			: Contains two fields. The first one describes the
+			  address on the MII bus. The second is the switch
+			  number that must be unique in cascaded configurations
 - #address-cells	: Must be 1
 - #size-cells		: Must be 0
 

+ 7 - 0
Documentation/devicetree/bindings/panel/ampire,am800480r3tmqwa1h.txt

@@ -0,0 +1,7 @@
+Ampire AM-800480R3TMQW-A1H 7.0" WVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "ampire,am800480r3tmqwa1h"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.

+ 7 - 0
Documentation/devicetree/bindings/panel/auo,b101ean01.txt

@@ -0,0 +1,7 @@
+AU Optronics Corporation 10.1" WSVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "auo,b101ean01"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.

+ 7 - 0
Documentation/devicetree/bindings/panel/innolux,at043tn24.txt

@@ -0,0 +1,7 @@
+Innolux AT043TN24 4.3" WQVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "innolux,at043tn24"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.

+ 7 - 0
Documentation/devicetree/bindings/panel/innolux,zj070na-01p.txt

@@ -0,0 +1,7 @@
+Innolux Corporation 7.0" WSVGA (1024x600) TFT LCD panel
+
+Required properties:
+- compatible: should be "innolux,zj070na-01p"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.

+ 7 - 0
Documentation/devicetree/bindings/panel/ortustech,com43h4m85ulc.txt

@@ -0,0 +1,7 @@
+OrtusTech COM43H4M85ULC Blanview 3.7" TFT-LCD panel
+
+Required properties:
+- compatible: should be "ortustech,com43h4m85ulc"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.

+ 7 - 0
Documentation/devicetree/bindings/panel/samsung,ltn140at29-301.txt

@@ -0,0 +1,7 @@
+Samsung Electronics 14" WXGA (1366x768) TFT LCD panel
+
+Required properties:
+- compatible: should be "samsung,ltn140at29-301"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.

+ 7 - 0
Documentation/devicetree/bindings/panel/shelly,sca07010-bfn-lnn.txt

@@ -0,0 +1,7 @@
+Shelly SCA07010-BFN-LNN 7.0" WVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "shelly,sca07010-bfn-lnn"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.

+ 2 - 0
Documentation/devicetree/bindings/vendor-prefixes.txt

@@ -17,6 +17,7 @@ altr	Altera Corp.
 amcc	Applied Micro Circuits Corporation (APM, formally AMCC)
 amd	Advanced Micro Devices (AMD), Inc.
 amlogic	Amlogic, Inc.
+ampire	Ampire Co., Ltd.
 ams	AMS AG
 amstaos	AMS-Taos Inc.
 apm	Applied Micro Circuits Corporation (APM)
@@ -132,6 +133,7 @@ nvidia	NVIDIA
 nxp	NXP Semiconductors
 onnn	ON Semiconductor Corp.
 opencores	OpenCores.org
+ortustech	Ortus Technology Co., Ltd.
 ovti	OmniVision Technologies
 panasonic	Panasonic Corporation
 parade	Parade Technologies Inc.

+ 8 - 0
Documentation/input/alps.txt

@@ -114,6 +114,9 @@ ALPS Absolute Mode - Protocol Version 2
  byte 4:  0   y6   y5   y4   y3   y2   y1   y0
  byte 5:  0   z6   z5   z4   z3   z2   z1   z0
 
+Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
+the DualPoint Stick.
+
 Dualpoint device -- interleaved packet format
 ---------------------------------------------
 
@@ -127,6 +130,11 @@ Dualpoint device -- interleaved packet format
  byte 7:    0   y6   y5   y4   y3   y2   y1   y0
  byte 8:    0   z6   z5   z4   z3   z2   z1   z0
 
+Devices which use the interleaving format normally send standard PS/2 mouse
+packets for the DualPoint Stick + ALPS Absolute Mode packets for the
+touchpad, switching to the interleaved packet format when both the stick and
+the touchpad are used at the same time.
+
 ALPS Absolute Mode - Protocol Version 3
 ---------------------------------------
 

+ 6 - 0
Documentation/input/event-codes.txt

@@ -294,6 +294,12 @@ accordingly. This property does not affect kernel behavior.
 The kernel does not provide button emulation for such devices but treats
 them as any other INPUT_PROP_BUTTONPAD device.
 
+INPUT_PROP_ACCELEROMETER
+-------------------------
+Directional axes on this device (absolute and/or relative x, y, z) represent
+accelerometer data. All other axes retain their meaning. A device must not mix
+regular directional axes and accelerometer axes on the same event node.
+
 Guidelines:
 ==========
 The guidelines below ensure proper single-touch and multi-finger functionality.

+ 6 - 3
Documentation/input/multi-touch-protocol.txt

@@ -312,9 +312,12 @@ ABS_MT_TOOL_TYPE
 
 The type of approaching tool. A lot of kernel drivers cannot distinguish
 between different tool types, such as a finger or a pen. In such cases, the
-event should be omitted. The protocol currently supports MT_TOOL_FINGER and
-MT_TOOL_PEN [2]. For type B devices, this event is handled by input core;
-drivers should instead use input_mt_report_slot_state().
+event should be omitted. The protocol currently supports MT_TOOL_FINGER,
+MT_TOOL_PEN, and MT_TOOL_PALM [2]. For type B devices, this event is handled
+by input core; drivers should instead use input_mt_report_slot_state().
+A contact's ABS_MT_TOOL_TYPE may change over time while still touching the
+device, because the firmware may not be able to determine which tool is being
+used when it first appears.
 
 ABS_MT_BLOB_ID
 

+ 14 - 17
MAINTAINERS

@@ -637,8 +637,7 @@ F:      drivers/gpu/drm/radeon/radeon_kfd.h
 F:      include/uapi/linux/kfd_ioctl.h
 
 AMD MICROCODE UPDATE SUPPORT
-M:	Andreas Herrmann <herrmann.der.user@googlemail.com>
-L:	amd64-microcode@amd64.org
+M:	Borislav Petkov <bp@alien8.de>
 S:	Maintained
 F:	arch/x86/kernel/cpu/microcode/amd*
 
@@ -3397,7 +3396,6 @@ T:	git git://people.freedesktop.org/~airlied/linux
 S:	Supported
 F:	drivers/gpu/drm/rcar-du/
 F:	drivers/gpu/drm/shmobile/
-F:	include/linux/platform_data/rcar-du.h
 F:	include/linux/platform_data/shmob_drm.h
 
 DSBR100 USB FM RADIO DRIVER
@@ -5095,7 +5093,7 @@ S:	Supported
 F:	drivers/platform/x86/intel_menlow.c
 
 INTEL IA32 MICROCODE UPDATE SUPPORT
-M:	Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
+M:	Borislav Petkov <bp@alien8.de>
 S:	Maintained
 F:	arch/x86/kernel/cpu/microcode/core*
 F:	arch/x86/kernel/cpu/microcode/intel*
@@ -5136,22 +5134,21 @@ M:	Deepak Saxena <dsaxena@plexity.net>
 S:	Maintained
 F:	drivers/char/hw_random/ixp4xx-rng.c
 
-INTEL ETHERNET DRIVERS (e100/e1000/e1000e/fm10k/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf)
+INTEL ETHERNET DRIVERS
 M:	Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-M:	Jesse Brandeburg <jesse.brandeburg@intel.com>
-M:	Bruce Allan <bruce.w.allan@intel.com>
-M:	Carolyn Wyborny <carolyn.wyborny@intel.com>
-M:	Don Skidmore <donald.c.skidmore@intel.com>
-M:	Greg Rose <gregory.v.rose@intel.com>
-M:	Matthew Vick <matthew.vick@intel.com>
-M:	John Ronciak <john.ronciak@intel.com>
-M:	Mitch Williams <mitch.a.williams@intel.com>
-M:	Linux NICS <linux.nics@intel.com>
-L:	e1000-devel@lists.sourceforge.net
+R:	Jesse Brandeburg <jesse.brandeburg@intel.com>
+R:	Shannon Nelson <shannon.nelson@intel.com>
+R:	Carolyn Wyborny <carolyn.wyborny@intel.com>
+R:	Don Skidmore <donald.c.skidmore@intel.com>
+R:	Matthew Vick <matthew.vick@intel.com>
+R:	John Ronciak <john.ronciak@intel.com>
+R:	Mitch Williams <mitch.a.williams@intel.com>
+L:	intel-wired-lan@lists.osuosl.org
 W:	http://www.intel.com/support/feedback.htm
 W:	http://e1000.sourceforge.net/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net.git
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next.git
+Q:	http://patchwork.ozlabs.org/project/intel-wired-lan/list/
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git
 S:	Supported
 F:	Documentation/networking/e100.txt
 F:	Documentation/networking/e1000.txt

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*

+ 1 - 1
arch/powerpc/include/asm/cputhreads.h

@@ -55,7 +55,7 @@ static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
 
 static inline int cpu_nr_cores(void)
 {
-	return NR_CPUS >> threads_shift;
+	return nr_cpu_ids >> threads_shift;
 }
 
 static inline cpumask_t cpu_online_cores_map(void)

+ 5 - 5
arch/x86/kernel/cpu/perf_event_intel.c

@@ -212,11 +212,11 @@ static struct event_constraint intel_hsw_event_constraints[] = {
 	INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
 	INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
 	/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
-	INTEL_EVENT_CONSTRAINT(0x08a3, 0x4),
+	INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
 	/* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
-	INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4),
+	INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
 	/* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
-	INTEL_EVENT_CONSTRAINT(0x04a3, 0xf),
+	INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
 	EVENT_CONSTRAINT_END
 };
 
@@ -1649,11 +1649,11 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
 	if (c)
 		return c;
 
-	c = intel_pebs_constraints(event);
+	c = intel_shared_regs_constraints(cpuc, event);
 	if (c)
 		return c;
 
-	c = intel_shared_regs_constraints(cpuc, event);
+	c = intel_pebs_constraints(event);
 	if (c)
 		return c;
 

+ 1 - 0
arch/x86/kernel/early-quirks.c

@@ -546,6 +546,7 @@ static const struct pci_device_id intel_stolen_ids[] __initconst = {
 	INTEL_BDW_D_IDS(&gen8_stolen_funcs),
 	INTEL_CHV_IDS(&chv_stolen_funcs),
 	INTEL_SKL_IDS(&gen9_stolen_funcs),
+	INTEL_BXT_IDS(&gen9_stolen_funcs),
 };
 
 static void __init intel_graphics_stolen(int num, int slot, int func)

+ 15 - 1
arch/x86/kernel/entry_64.S

@@ -799,7 +799,21 @@ retint_swapgs:		/* return to user-space */
 	cmpq %r11,(EFLAGS-ARGOFFSET)(%rsp)	/* R11 == RFLAGS */
 	jne opportunistic_sysret_failed
 
-	testq $X86_EFLAGS_RF,%r11		/* sysret can't restore RF */
+	/*
+	 * SYSRET can't restore RF.  SYSRET can restore TF, but unlike IRET,
+	 * restoring TF results in a trap from userspace immediately after
+	 * SYSRET.  This would cause an infinite loop whenever #DB happens
+	 * with register state that satisfies the opportunistic SYSRET
+	 * conditions.  For example, single-stepping this user code:
+	 *
+	 *           movq $stuck_here,%rcx
+	 *           pushfq
+	 *           popq %r11
+	 *   stuck_here:
+	 *
+	 * would never get past 'stuck_here'.
+	 */
+	testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
 	jnz opportunistic_sysret_failed
 
 	/* nothing to check for RSP */

+ 1 - 1
arch/x86/kernel/kgdb.c

@@ -72,7 +72,7 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
 	{ "bx", 8, offsetof(struct pt_regs, bx) },
 	{ "cx", 8, offsetof(struct pt_regs, cx) },
 	{ "dx", 8, offsetof(struct pt_regs, dx) },
-	{ "si", 8, offsetof(struct pt_regs, dx) },
+	{ "si", 8, offsetof(struct pt_regs, si) },
 	{ "di", 8, offsetof(struct pt_regs, di) },
 	{ "bp", 8, offsetof(struct pt_regs, bp) },
 	{ "sp", 8, offsetof(struct pt_regs, sp) },

+ 10 - 0
arch/x86/kernel/reboot.c

@@ -183,6 +183,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
 		},
 	},
 
+	/* ASRock */
+	{	/* Handle problems with rebooting on ASRock Q1900DC-ITX */
+		.callback = set_pci_reboot,
+		.ident = "ASRock Q1900DC-ITX",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "ASRock"),
+			DMI_MATCH(DMI_BOARD_NAME, "Q1900DC-ITX"),
+		},
+	},
+
 	/* ASUS */
 	{	/* Handle problems with rebooting on ASUS P4S800 */
 		.callback = set_bios_reboot,

+ 9 - 1
arch/x86/xen/p2m.c

@@ -91,6 +91,12 @@ EXPORT_SYMBOL_GPL(xen_p2m_size);
 unsigned long xen_max_p2m_pfn __read_mostly;
 EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
 
+#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+#else
+#define P2M_LIMIT 0
+#endif
+
 static DEFINE_SPINLOCK(p2m_update_lock);
 
 static unsigned long *p2m_mid_missing_mfn;
@@ -385,9 +391,11 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
 void __init xen_vmalloc_p2m_tree(void)
 {
 	static struct vm_struct vm;
+	unsigned long p2m_limit;
 
+	p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
 	vm.flags = VM_ALLOC;
-	vm.size = ALIGN(sizeof(unsigned long) * xen_max_p2m_pfn,
+	vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
 			PMD_SIZE * PMDS_PER_MID_PAGE);
 	vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
 	pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);

+ 3 - 3
block/blk-settings.c

@@ -585,7 +585,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 				     b->physical_block_size);
 
 	t->io_min = max(t->io_min, b->io_min);
-	t->io_opt = lcm(t->io_opt, b->io_opt);
+	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
 
 	t->cluster &= b->cluster;
 	t->discard_zeroes_data &= b->discard_zeroes_data;
@@ -616,7 +616,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 		    b->raid_partial_stripes_expensive);
 
 	/* Find lowest common alignment_offset */
-	t->alignment_offset = lcm(t->alignment_offset, alignment)
+	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
 		% max(t->physical_block_size, t->io_min);
 
 	/* Verify that new alignment_offset is on a logical block boundary */
@@ -643,7 +643,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 						      b->max_discard_sectors);
 		t->discard_granularity = max(t->discard_granularity,
 					     b->discard_granularity);
-		t->discard_alignment = lcm(t->discard_alignment, alignment) %
+		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
 			t->discard_granularity;
 	}
 

+ 13 - 2
drivers/ata/libata-core.c

@@ -4204,9 +4204,18 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
 	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
 
 	/* devices that don't properly handle queued TRIM commands */
-	{ "Micron_M[56]*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+	{ "Micron_M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "Crucial_CT*M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "Micron_M5[15]0*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "Crucial_CT*M550*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "Crucial_CT*MX100*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "Samsung SSD 850 PRO*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
-	{ "Crucial_CT*SSD*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
 
 	/*
 	 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
@@ -4226,6 +4235,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
 	 */
 	{ "INTEL*SSDSC2MH*",		NULL,	0, },
 
+	{ "Micron*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "Crucial*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
 	{ "INTEL*SSD*", 		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
 	{ "SSD*INTEL*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
 	{ "Samsung*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },

+ 1 - 0
drivers/dma/bcm2835-dma.c

@@ -475,6 +475,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
 	 * c->desc is NULL and exit.)
 	 */
 	if (c->desc) {
+		bcm2835_dma_desc_free(&c->desc->vd);
 		c->desc = NULL;
 		bcm2835_dma_abort(c->chan_base);
 

+ 7 - 0
drivers/dma/dma-jz4740.c

@@ -511,6 +511,9 @@ static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc)
 	kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc));
 }
 
+#define JZ4740_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
 static int jz4740_dma_probe(struct platform_device *pdev)
 {
 	struct jz4740_dmaengine_chan *chan;
@@ -548,6 +551,10 @@ static int jz4740_dma_probe(struct platform_device *pdev)
 	dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
 	dd->device_config = jz4740_dma_slave_config;
 	dd->device_terminate_all = jz4740_dma_terminate_all;
+	dd->src_addr_widths = JZ4740_DMA_BUSWIDTHS;
+	dd->dst_addr_widths = JZ4740_DMA_BUSWIDTHS;
+	dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 	dd->dev = &pdev->dev;
 	INIT_LIST_HEAD(&dd->channels);
 

+ 7 - 0
drivers/dma/edma.c

@@ -260,6 +260,13 @@ static int edma_terminate_all(struct dma_chan *chan)
 	 */
 	if (echan->edesc) {
 		int cyclic = echan->edesc->cyclic;
+
+		/*
+		 * free the running request descriptor
+		 * since it is not in any of the vdesc lists
+		 */
+		edma_desc_free(&echan->edesc->vdesc);
+
 		echan->edesc = NULL;
 		edma_stop(echan->ch_num);
 		/* Move the cyclic channel back to default queue */

+ 3 - 1
drivers/dma/moxart-dma.c

@@ -193,8 +193,10 @@ static int moxart_terminate_all(struct dma_chan *chan)
 
 	spin_lock_irqsave(&ch->vc.lock, flags);
 
-	if (ch->desc)
+	if (ch->desc) {
+		moxart_dma_desc_free(&ch->desc->vd);
 		ch->desc = NULL;
+	}
 
 	ctrl = readl(ch->base + REG_OFF_CTRL);
 	ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);

+ 1 - 0
drivers/dma/omap-dma.c

@@ -981,6 +981,7 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
 	 * c->desc is NULL and exit.)
 	 */
 	if (c->desc) {
+		omap_dma_desc_free(&c->desc->vd);
 		c->desc = NULL;
 		/* Avoid stopping the dma twice */
 		if (!c->paused)

+ 7 - 15
drivers/firmware/dmi_scan.c

@@ -86,10 +86,13 @@ static void dmi_table(u8 *buf, u32 len, int num,
 	int i = 0;
 
 	/*
-	 *	Stop when we see all the items the table claimed to have
-	 *	OR we run off the end of the table (also happens)
+	 * Stop when we have seen all the items the table claimed to have
+	 * (SMBIOS < 3.0 only) OR we reach an end-of-table marker OR we run
+	 * off the end of the table (should never happen but sometimes does
+	 * on bogus implementations.)
 	 */
-	while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) {
+	while ((!num || i < num) &&
+	       (data - buf + sizeof(struct dmi_header)) <= len) {
 		const struct dmi_header *dm = (const struct dmi_header *)data;
 
 		/*
@@ -529,21 +532,10 @@ static int __init dmi_smbios3_present(const u8 *buf)
 	if (memcmp(buf, "_SM3_", 5) == 0 &&
 	    buf[6] < 32 && dmi_checksum(buf, buf[6])) {
 		dmi_ver = get_unaligned_be16(buf + 7);
+		dmi_num = 0;			/* No longer specified */
 		dmi_len = get_unaligned_le32(buf + 12);
 		dmi_base = get_unaligned_le64(buf + 16);
 
-		/*
-		 * The 64-bit SMBIOS 3.0 entry point no longer has a field
-		 * containing the number of structures present in the table.
-		 * Instead, it defines the table size as a maximum size, and
-		 * relies on the end-of-table structure type (#127) to be used
-		 * to signal the end of the table.
-		 * So let's define dmi_num as an upper bound as well: each
-		 * structure has a 4 byte header, so dmi_len / 4 is an upper
-		 * bound for the number of structures in the table.
-		 */
-		dmi_num = dmi_len / 4;
-
 		if (dmi_walk_early(dmi_decode) == 0) {
 			pr_info("SMBIOS %d.%d present.\n",
 				dmi_ver >> 8, dmi_ver & 0xFF);

+ 1 - 1
drivers/gpio/gpio-mpc8xxx.c

@@ -334,7 +334,7 @@ static struct irq_domain_ops mpc8xxx_gpio_irq_ops = {
 	.xlate	= irq_domain_xlate_twocell,
 };
 
-static struct of_device_id mpc8xxx_gpio_ids[] __initdata = {
+static struct of_device_id mpc8xxx_gpio_ids[] = {
 	{ .compatible = "fsl,mpc8349-gpio", },
 	{ .compatible = "fsl,mpc8572-gpio", },
 	{ .compatible = "fsl,mpc8610-gpio", },

+ 1 - 1
drivers/gpio/gpio-syscon.c

@@ -219,7 +219,7 @@ static int syscon_gpio_probe(struct platform_device *pdev)
 		ret = of_property_read_u32_index(np, "gpio,syscon-dev", 2,
 						 &priv->dir_reg_offset);
 		if (ret)
-			dev_err(dev, "can't read the dir register offset!\n");
+			dev_dbg(dev, "can't read the dir register offset!\n");
 
 		priv->dir_reg_offset <<= 3;
 	}

+ 10 - 0
drivers/gpio/gpiolib-acpi.c

@@ -201,6 +201,10 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
 	if (!handler)
 		return AE_BAD_PARAMETER;
 
+	pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
+	if (pin < 0)
+		return AE_BAD_PARAMETER;
+
 	desc = gpiochip_request_own_desc(chip, pin, "ACPI:Event");
 	if (IS_ERR(desc)) {
 		dev_err(chip->dev, "Failed to request GPIO\n");
@@ -551,6 +555,12 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
 		struct gpio_desc *desc;
 		bool found;
 
+		pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
+		if (pin < 0) {
+			status = AE_BAD_PARAMETER;
+			goto out;
+		}
+
 		mutex_lock(&achip->conn_lock);
 
 		found = false;

+ 9 - 0
drivers/gpu/drm/Kconfig

@@ -165,6 +165,15 @@ config DRM_SAVAGE
 	  Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
 	  chipset. If M is selected the module will be called savage.
 
+config DRM_VGEM
+	tristate "Virtual GEM provider"
+	depends on DRM
+	help
+	  Choose this option to get a virtual graphics memory manager,
+	  as used by Mesa's software renderer for enhanced performance.
+	  If M is selected the module will be called vgem.
+
+
 source "drivers/gpu/drm/exynos/Kconfig"
 
 source "drivers/gpu/drm/rockchip/Kconfig"

+ 1 - 0
drivers/gpu/drm/Makefile

@@ -48,6 +48,7 @@ obj-$(CONFIG_DRM_SIS)   += sis/
 obj-$(CONFIG_DRM_SAVAGE)+= savage/
 obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
 obj-$(CONFIG_DRM_VIA)	+=via/
+obj-$(CONFIG_DRM_VGEM)	+= vgem/
 obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
 obj-$(CONFIG_DRM_EXYNOS) +=exynos/
 obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/

+ 7 - 6
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c

@@ -435,21 +435,22 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
 {
 	struct kfd_ioctl_get_clock_counters_args *args = data;
 	struct kfd_dev *dev;
-	struct timespec time;
+	struct timespec64 time;
 
 	dev = kfd_device_by_id(args->gpu_id);
 	if (dev == NULL)
 		return -EINVAL;
 
 	/* Reading GPU clock counter from KGD */
-	args->gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
+	args->gpu_clock_counter =
+		dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
 
 	/* No access to rdtsc. Using raw monotonic time */
-	getrawmonotonic(&time);
-	args->cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
+	getrawmonotonic64(&time);
+	args->cpu_clock_counter = (uint64_t)timespec64_to_ns(&time);
 
-	get_monotonic_boottime(&time);
-	args->system_clock_counter = (uint64_t)timespec_to_ns(&time);
+	get_monotonic_boottime64(&time);
+	args->system_clock_counter = (uint64_t)timespec64_to_ns(&time);
 
 	/* Since the counter is in nano-seconds we use 1GHz frequency */
 	args->system_clock_freq = 1000000000;

+ 12 - 5
drivers/gpu/drm/amd/amdkfd/kfd_device.c

@@ -94,7 +94,8 @@ static const struct kfd_device_info *lookup_device_info(unsigned short did)
 	return NULL;
 }
 
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev)
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+	struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
 {
 	struct kfd_dev *kfd;
 
@@ -112,6 +113,11 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev)
 	kfd->device_info = device_info;
 	kfd->pdev = pdev;
 	kfd->init_complete = false;
+	kfd->kfd2kgd = f2g;
+
+	mutex_init(&kfd->doorbell_mutex);
+	memset(&kfd->doorbell_available_index, 0,
+		sizeof(kfd->doorbell_available_index));
 
 	return kfd;
 }
@@ -200,8 +206,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
 	/* add another 512KB for all other allocations on gart (HPD, fences) */
 	size += 512 * 1024;
 
-	if (kfd2kgd->init_gtt_mem_allocation(kfd->kgd, size, &kfd->gtt_mem,
-			&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)) {
+	if (kfd->kfd2kgd->init_gtt_mem_allocation(
+			kfd->kgd, size, &kfd->gtt_mem,
+			&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){
 		dev_err(kfd_device,
 			"Could not allocate %d bytes for device (%x:%x)\n",
 			size, kfd->pdev->vendor, kfd->pdev->device);
@@ -270,7 +277,7 @@ device_iommu_pasid_error:
 kfd_topology_add_device_error:
 	kfd_gtt_sa_fini(kfd);
 kfd_gtt_sa_init_error:
-	kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
+	kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
 	dev_err(kfd_device,
 		"device (%x:%x) NOT added due to errors\n",
 		kfd->pdev->vendor, kfd->pdev->device);
@@ -285,7 +292,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
 		amd_iommu_free_device(kfd->pdev);
 		kfd_topology_remove_device(kfd);
 		kfd_gtt_sa_fini(kfd);
-		kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
+		kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
 	}
 
 	kfree(kfd);

+ 11 - 7
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c

@@ -82,7 +82,8 @@ static inline unsigned int get_pipes_num_cpsch(void)
 void program_sh_mem_settings(struct device_queue_manager *dqm,
 					struct qcm_process_device *qpd)
 {
-	return kfd2kgd->program_sh_mem_settings(dqm->dev->kgd, qpd->vmid,
+	return dqm->dev->kfd2kgd->program_sh_mem_settings(
+						dqm->dev->kgd, qpd->vmid,
 						qpd->sh_mem_config,
 						qpd->sh_mem_ape1_base,
 						qpd->sh_mem_ape1_limit,
@@ -457,9 +458,12 @@ set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
 {
 	uint32_t pasid_mapping;
 
-	pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
-						ATC_VMID_PASID_MAPPING_VALID;
-	return kfd2kgd->set_pasid_vmid_mapping(dqm->dev->kgd, pasid_mapping,
+	pasid_mapping = (pasid == 0) ? 0 :
+		(uint32_t)pasid |
+		ATC_VMID_PASID_MAPPING_VALID;
+
+	return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
+						dqm->dev->kgd, pasid_mapping,
 						vmid);
 }
 
@@ -511,7 +515,7 @@ int init_pipelines(struct device_queue_manager *dqm,
 		pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
 		pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
 		/* = log2(bytes/4)-1 */
-		kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
+		dqm->dev->kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
 				CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
 	}
 
@@ -905,7 +909,7 @@ out:
 	return retval;
 }
 
-static int fence_wait_timeout(unsigned int *fence_addr,
+static int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
 				unsigned int fence_value,
 				unsigned long timeout)
 {
@@ -961,7 +965,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
 	pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
 				KFD_FENCE_COMPLETED);
 	/* should be timed out */
-	fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
+	amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
 				QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
 	pm_release_ib(&dqm->packets);
 	dqm->active_runlist = false;

+ 7 - 10
drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c

@@ -32,9 +32,6 @@
  * and that's assures that any user process won't get access to the
  * kernel doorbells page
  */
-static DEFINE_MUTEX(doorbell_mutex);
-static unsigned long doorbell_available_index[
-	DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)] = { 0 };
 
 #define KERNEL_DOORBELL_PASID 1
 #define KFD_SIZE_OF_DOORBELL_IN_BYTES 4
@@ -170,12 +167,12 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
 
 	BUG_ON(!kfd || !doorbell_off);
 
-	mutex_lock(&doorbell_mutex);
-	inx = find_first_zero_bit(doorbell_available_index,
+	mutex_lock(&kfd->doorbell_mutex);
+	inx = find_first_zero_bit(kfd->doorbell_available_index,
 					KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
 
-	__set_bit(inx, doorbell_available_index);
-	mutex_unlock(&doorbell_mutex);
+	__set_bit(inx, kfd->doorbell_available_index);
+	mutex_unlock(&kfd->doorbell_mutex);
 
 	if (inx >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
 		return NULL;
@@ -203,9 +200,9 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
 
 	inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
 
-	mutex_lock(&doorbell_mutex);
-	__clear_bit(inx, doorbell_available_index);
-	mutex_unlock(&doorbell_mutex);
+	mutex_lock(&kfd->doorbell_mutex);
+	__clear_bit(inx, kfd->doorbell_available_index);
+	mutex_unlock(&kfd->doorbell_mutex);
 }
 
 inline void write_kernel_doorbell(u32 __iomem *db, u32 value)

+ 1 - 11
drivers/gpu/drm/amd/amdkfd/kfd_module.c

@@ -34,7 +34,6 @@
 #define KFD_DRIVER_MINOR	7
 #define KFD_DRIVER_PATCHLEVEL	1
 
-const struct kfd2kgd_calls *kfd2kgd;
 static const struct kgd2kfd_calls kgd2kfd = {
 	.exit		= kgd2kfd_exit,
 	.probe		= kgd2kfd_probe,
@@ -55,9 +54,7 @@ module_param(max_num_of_queues_per_device, int, 0444);
 MODULE_PARM_DESC(max_num_of_queues_per_device,
 	"Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
 
-bool kgd2kfd_init(unsigned interface_version,
-		  const struct kfd2kgd_calls *f2g,
-		  const struct kgd2kfd_calls **g2f)
+bool kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f)
 {
 	/*
 	 * Only one interface version is supported,
@@ -66,11 +63,6 @@ bool kgd2kfd_init(unsigned interface_version,
 	if (interface_version != KFD_INTERFACE_VERSION)
 		return false;
 
-	/* Protection against multiple amd kgd loads */
-	if (kfd2kgd)
-		return true;
-
-	kfd2kgd = f2g;
 	*g2f = &kgd2kfd;
 
 	return true;
@@ -85,8 +77,6 @@ static int __init kfd_module_init(void)
 {
 	int err;
 
-	kfd2kgd = NULL;
-
 	/* Verify module parameters */
 	if ((sched_policy < KFD_SCHED_POLICY_HWS) ||
 		(sched_policy > KFD_SCHED_POLICY_NO_HWS)) {

+ 7 - 6
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c

@@ -151,14 +151,15 @@ static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd,
 static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
 			uint32_t queue_id, uint32_t __user *wptr)
 {
-	return kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
+	return mm->dev->kfd2kgd->hqd_load
+		(mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
 }
 
 static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
 			uint32_t pipe_id, uint32_t queue_id,
 			uint32_t __user *wptr)
 {
-	return kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd);
+	return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd);
 }
 
 static int update_mqd(struct mqd_manager *mm, void *mqd,
@@ -245,7 +246,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
 			unsigned int timeout, uint32_t pipe_id,
 			uint32_t queue_id)
 {
-	return kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout,
+	return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout,
 					pipe_id, queue_id);
 }
 
@@ -258,7 +259,7 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
 				unsigned int timeout, uint32_t pipe_id,
 				uint32_t queue_id)
 {
-	return kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
+	return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
 }
 
 static bool is_occupied(struct mqd_manager *mm, void *mqd,
@@ -266,7 +267,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
 			uint32_t queue_id)
 {
 
-	return kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
+	return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
 					pipe_id, queue_id);
 
 }
@@ -275,7 +276,7 @@ static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
 			uint64_t queue_address,	uint32_t pipe_id,
 			uint32_t queue_id)
 {
-	return kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
+	return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
 }
 
 /*

+ 8 - 6
drivers/gpu/drm/amd/amdkfd/kfd_priv.h

@@ -148,6 +148,11 @@ struct kfd_dev {
 
 	struct kgd2kfd_shared_resources shared_resources;
 
+	const struct kfd2kgd_calls *kfd2kgd;
+	struct mutex doorbell_mutex;
+	unsigned long doorbell_available_index[DIV_ROUND_UP(
+		KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)];
+
 	void *gtt_mem;
 	uint64_t gtt_start_gpu_addr;
 	void *gtt_start_cpu_ptr;
@@ -164,13 +169,12 @@ struct kfd_dev {
 
 /* KGD2KFD callbacks */
 void kgd2kfd_exit(void);
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev);
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+			struct pci_dev *pdev, const struct kfd2kgd_calls *f2g);
 bool kgd2kfd_device_init(struct kfd_dev *kfd,
-			 const struct kgd2kfd_shared_resources *gpu_resources);
+			const struct kgd2kfd_shared_resources *gpu_resources);
 void kgd2kfd_device_exit(struct kfd_dev *kfd);
 
-extern const struct kfd2kgd_calls *kfd2kgd;
-
 enum kfd_mempool {
 	KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
 	KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
@@ -378,8 +382,6 @@ struct qcm_process_device {
 	/* The Device Queue Manager that owns this data */
 	struct device_queue_manager *dqm;
 	struct process_queue_manager *pqm;
-	/* Device Queue Manager lock */
-	struct mutex *lock;
 	/* Queues list */
 	struct list_head queues_list;
 	struct list_head priv_queue_list;

+ 6 - 0
drivers/gpu/drm/amd/amdkfd/kfd_process.c

@@ -162,10 +162,16 @@ static void kfd_process_wq_release(struct work_struct *work)
 
 	p = my_work->p;
 
+	pr_debug("Releasing process (pasid %d) in workqueue\n",
+			p->pasid);
+
 	mutex_lock(&p->mutex);
 
 	list_for_each_entry_safe(pdd, temp, &p->per_device_data,
 							per_device_list) {
+		pr_debug("Releasing pdd (topology id %d) for process (pasid %d) in workqueue\n",
+				pdd->dev->id, p->pasid);
+
 		amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
 		list_del(&pdd->per_device_list);
 

+ 7 - 5
drivers/gpu/drm/amd/amdkfd/kfd_topology.c

@@ -726,13 +726,14 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
 		}
 
 		sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute",
-				kfd2kgd->get_max_engine_clock_in_mhz(
+			dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz(
 					dev->gpu->kgd));
 		sysfs_show_64bit_prop(buffer, "local_mem_size",
-				kfd2kgd->get_vmem_size(dev->gpu->kgd));
+			dev->gpu->kfd2kgd->get_vmem_size(
+					dev->gpu->kgd));
 
 		sysfs_show_32bit_prop(buffer, "fw_version",
-				kfd2kgd->get_fw_version(
+			dev->gpu->kfd2kgd->get_fw_version(
 						dev->gpu->kgd,
 						KGD_ENGINE_MEC1));
 	}
@@ -1099,8 +1100,9 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
 	buf[2] = gpu->pdev->subsystem_device;
 	buf[3] = gpu->pdev->device;
 	buf[4] = gpu->pdev->bus->number;
-	buf[5] = (uint32_t)(kfd2kgd->get_vmem_size(gpu->kgd) & 0xffffffff);
-	buf[6] = (uint32_t)(kfd2kgd->get_vmem_size(gpu->kgd) >> 32);
+	buf[5] = (uint32_t)(gpu->kfd2kgd->get_vmem_size(gpu->kgd)
+			& 0xffffffff);
+	buf[6] = (uint32_t)(gpu->kfd2kgd->get_vmem_size(gpu->kgd) >> 32);
 
 	for (i = 0, hashout = 0; i < 7; i++)
 		hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH);

+ 32 - 32
drivers/gpu/drm/amd/include/kgd_kfd_interface.h

@@ -76,37 +76,6 @@ struct kgd2kfd_shared_resources {
 	size_t doorbell_start_offset;
 };
 
-/**
- * struct kgd2kfd_calls
- *
- * @exit: Notifies amdkfd that kgd module is unloaded
- *
- * @probe: Notifies amdkfd about a probe done on a device in the kgd driver.
- *
- * @device_init: Initialize the newly probed device (if it is a device that
- * amdkfd supports)
- *
- * @device_exit: Notifies amdkfd about a removal of a kgd device
- *
- * @suspend: Notifies amdkfd about a suspend action done to a kgd device
- *
- * @resume: Notifies amdkfd about a resume action done to a kgd device
- *
- * This structure contains function callback pointers so the kgd driver
- * will notify to the amdkfd about certain status changes.
- *
- */
-struct kgd2kfd_calls {
-	void (*exit)(void);
-	struct kfd_dev* (*probe)(struct kgd_dev *kgd, struct pci_dev *pdev);
-	bool (*device_init)(struct kfd_dev *kfd,
-			const struct kgd2kfd_shared_resources *gpu_resources);
-	void (*device_exit)(struct kfd_dev *kfd);
-	void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry);
-	void (*suspend)(struct kfd_dev *kfd);
-	int (*resume)(struct kfd_dev *kfd);
-};
-
 /**
  * struct kfd2kgd_calls
  *
@@ -196,8 +165,39 @@ struct kfd2kgd_calls {
 				enum kgd_engine_type type);
 };
 
+/**
+ * struct kgd2kfd_calls
+ *
+ * @exit: Notifies amdkfd that kgd module is unloaded
+ *
+ * @probe: Notifies amdkfd about a probe done on a device in the kgd driver.
+ *
+ * @device_init: Initialize the newly probed device (if it is a device that
+ * amdkfd supports)
+ *
+ * @device_exit: Notifies amdkfd about a removal of a kgd device
+ *
+ * @suspend: Notifies amdkfd about a suspend action done to a kgd device
+ *
+ * @resume: Notifies amdkfd about a resume action done to a kgd device
+ *
+ * This structure contains function callback pointers so the kgd driver
+ * will notify to the amdkfd about certain status changes.
+ *
+ */
+struct kgd2kfd_calls {
+	void (*exit)(void);
+	struct kfd_dev* (*probe)(struct kgd_dev *kgd, struct pci_dev *pdev,
+		const struct kfd2kgd_calls *f2g);
+	bool (*device_init)(struct kfd_dev *kfd,
+			const struct kgd2kfd_shared_resources *gpu_resources);
+	void (*device_exit)(struct kfd_dev *kfd);
+	void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry);
+	void (*suspend)(struct kfd_dev *kfd);
+	int (*resume)(struct kfd_dev *kfd);
+};
+
 bool kgd2kfd_init(unsigned interface_version,
-		const struct kfd2kgd_calls *f2g,
 		const struct kgd2kfd_calls **g2f);
 
 #endif	/* KGD_KFD_INTERFACE_H_INCLUDED */

+ 21 - 0
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c

@@ -207,6 +207,27 @@ static void atmel_hlcdc_crtc_enable(struct drm_crtc *c)
 	crtc->enabled = true;
 }
 
+void atmel_hlcdc_crtc_suspend(struct drm_crtc *c)
+{
+	struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+
+	if (crtc->enabled) {
+		atmel_hlcdc_crtc_disable(c);
+		/* save enable state for resume */
+		crtc->enabled = true;
+	}
+}
+
+void atmel_hlcdc_crtc_resume(struct drm_crtc *c)
+{
+	struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+
+	if (crtc->enabled) {
+		crtc->enabled = false;
+		atmel_hlcdc_crtc_enable(c);
+	}
+}
+
 static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c,
 					 struct drm_crtc_state *s)
 {

+ 4 - 15
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c

@@ -569,14 +569,8 @@ static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
 		return 0;
 
 	drm_modeset_lock_all(drm_dev);
-	list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
-		struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-		if (crtc->enabled) {
-			crtc_funcs->disable(crtc);
-			/* save enable state for resume */
-			crtc->enabled = true;
-		}
-	}
+	list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head)
+		atmel_hlcdc_crtc_suspend(crtc);
 	drm_modeset_unlock_all(drm_dev);
 	return 0;
 }
@@ -590,13 +584,8 @@ static int atmel_hlcdc_dc_drm_resume(struct device *dev)
 		return 0;
 
 	drm_modeset_lock_all(drm_dev);
-	list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
-		struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-		if (crtc->enabled) {
-			crtc->enabled = false;
-			crtc_funcs->enable(crtc);
-		}
-	}
+	list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head)
+		atmel_hlcdc_crtc_resume(crtc);
 	drm_modeset_unlock_all(drm_dev);
 	return 0;
 }

+ 3 - 0
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h

@@ -155,6 +155,9 @@ void atmel_hlcdc_crtc_irq(struct drm_crtc *c);
 void atmel_hlcdc_crtc_cancel_page_flip(struct drm_crtc *crtc,
 				       struct drm_file *file);
 
+void atmel_hlcdc_crtc_suspend(struct drm_crtc *crtc);
+void atmel_hlcdc_crtc_resume(struct drm_crtc *crtc);
+
 int atmel_hlcdc_crtc_create(struct drm_device *dev);
 
 int atmel_hlcdc_create_outputs(struct drm_device *dev);

+ 1 - 0
drivers/gpu/drm/bochs/bochs_hw.c

@@ -164,6 +164,7 @@ void bochs_hw_setmode(struct bochs_device *bochs,
 
 	bochs_vga_writeb(bochs, 0x3c0, 0x20); /* unblank */
 
+	bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE,      0);
 	bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP,         bochs->bpp);
 	bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES,        bochs->xres);
 	bochs_dispi_write(bochs, VBE_DISPI_INDEX_YRES,        bochs->yres);

+ 11 - 0
drivers/gpu/drm/bridge/Kconfig

@@ -11,3 +11,14 @@ config DRM_PTN3460
 	select DRM_PANEL
 	---help---
 	  ptn3460 eDP-LVDS bridge chip driver.
+
+config DRM_PS8622
+	tristate "Parade eDP/LVDS bridge"
+	depends on DRM
+	depends on OF
+	select DRM_PANEL
+	select DRM_KMS_HELPER
+	select BACKLIGHT_LCD_SUPPORT
+	select BACKLIGHT_CLASS_DEVICE
+	---help---
+	  parade eDP-LVDS bridge chip driver.

+ 1 - 0
drivers/gpu/drm/bridge/Makefile

@@ -1,4 +1,5 @@
 ccflags-y := -Iinclude/drm
 
+obj-$(CONFIG_DRM_PS8622) += ps8622.o
 obj-$(CONFIG_DRM_PTN3460) += ptn3460.o
 obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o

+ 684 - 0
drivers/gpu/drm/bridge/ps8622.c

@@ -0,0 +1,684 @@
+/*
+ * Parade PS8622 eDP/LVDS bridge driver
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_panel.h>
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+/* Brightness scale on the Parade chip */
+#define PS8622_MAX_BRIGHTNESS 0xff
+
+/* Timings taken from the version 1.7 datasheet for the PS8622/PS8625 */
+#define PS8622_POWER_RISE_T1_MIN_US 10
+#define PS8622_POWER_RISE_T1_MAX_US 10000
+#define PS8622_RST_HIGH_T2_MIN_US 3000
+#define PS8622_RST_HIGH_T2_MAX_US 30000
+#define PS8622_PWMO_END_T12_MS 200
+#define PS8622_POWER_FALL_T16_MAX_US 10000
+#define PS8622_POWER_OFF_T17_MS 500
+
+#if ((PS8622_RST_HIGH_T2_MIN_US + PS8622_POWER_RISE_T1_MAX_US) > \
+	(PS8622_RST_HIGH_T2_MAX_US + PS8622_POWER_RISE_T1_MIN_US))
+#error "T2.min + T1.max must be less than T2.max + T1.min"
+#endif
+
+struct ps8622_bridge {
+	struct drm_connector connector;
+	struct i2c_client *client;
+	struct drm_bridge bridge;
+	struct drm_panel *panel;
+	struct regulator *v12;
+	struct backlight_device *bl;
+
+	struct gpio_desc *gpio_slp;
+	struct gpio_desc *gpio_rst;
+
+	u32 max_lane_count;
+	u32 lane_count;
+
+	bool enabled;
+};
+
+static inline struct ps8622_bridge *
+		bridge_to_ps8622(struct drm_bridge *bridge)
+{
+	return container_of(bridge, struct ps8622_bridge, bridge);
+}
+
+static inline struct ps8622_bridge *
+		connector_to_ps8622(struct drm_connector *connector)
+{
+	return container_of(connector, struct ps8622_bridge, connector);
+}
+
+static int ps8622_set(struct i2c_client *client, u8 page, u8 reg, u8 val)
+{
+	int ret;
+	struct i2c_adapter *adap = client->adapter;
+	struct i2c_msg msg;
+	u8 data[] = {reg, val};
+
+	msg.addr = client->addr + page;
+	msg.flags = 0;
+	msg.len = sizeof(data);
+	msg.buf = data;
+
+	ret = i2c_transfer(adap, &msg, 1);
+	if (ret != 1)
+		pr_warn("PS8622 I2C write (0x%02x,0x%02x,0x%02x) failed: %d\n",
+			client->addr + page, reg, val, ret);
+	return !(ret == 1);
+}
+
+static int ps8622_send_config(struct ps8622_bridge *ps8622)
+{
+	struct i2c_client *cl = ps8622->client;
+	int err = 0;
+
+	/* HPD low */
+	err = ps8622_set(cl, 0x02, 0xa1, 0x01);
+	if (err)
+		goto error;
+
+	/* SW setting: [1:0] SW output 1.2V voltage is lower to 96% */
+	err = ps8622_set(cl, 0x04, 0x14, 0x01);
+	if (err)
+		goto error;
+
+	/* RCO SS setting: [5:4] = b01 0.5%, b10 1%, b11 1.5% */
+	err = ps8622_set(cl, 0x04, 0xe3, 0x20);
+	if (err)
+		goto error;
+
+	/* [7] RCO SS enable */
+	err = ps8622_set(cl, 0x04, 0xe2, 0x80);
+	if (err)
+		goto error;
+
+	/* RPHY Setting
+	 * [3:2] CDR tune wait cycle before measure for fine tune
+	 * b00: 1us b01: 0.5us b10:2us, b11: 4us
+	 */
+	err = ps8622_set(cl, 0x04, 0x8a, 0x0c);
+	if (err)
+		goto error;
+
+	/* [3] RFD always on */
+	err = ps8622_set(cl, 0x04, 0x89, 0x08);
+	if (err)
+		goto error;
+
+	/* CTN lock in/out: 20000ppm/80000ppm. Lock out 2 times. */
+	err = ps8622_set(cl, 0x04, 0x71, 0x2d);
+	if (err)
+		goto error;
+
+	/* 2.7G CDR settings: NOF=40LSB for HBR CDR  setting */
+	err = ps8622_set(cl, 0x04, 0x7d, 0x07);
+	if (err)
+		goto error;
+
+	/* [1:0] Fmin=+4bands */
+	err = ps8622_set(cl, 0x04, 0x7b, 0x00);
+	if (err)
+		goto error;
+
+	/* [7:5] DCO_FTRNG=+-40% */
+	err = ps8622_set(cl, 0x04, 0x7a, 0xfd);
+	if (err)
+		goto error;
+
+	/* 1.62G CDR settings: [5:2]NOF=64LSB [1:0]DCO scale is 2/5 */
+	err = ps8622_set(cl, 0x04, 0xc0, 0x12);
+	if (err)
+		goto error;
+
+	/* Gitune=-37% */
+	err = ps8622_set(cl, 0x04, 0xc1, 0x92);
+	if (err)
+		goto error;
+
+	/* Fbstep=100% */
+	err = ps8622_set(cl, 0x04, 0xc2, 0x1c);
+	if (err)
+		goto error;
+
+	/* [7] LOS signal disable */
+	err = ps8622_set(cl, 0x04, 0x32, 0x80);
+	if (err)
+		goto error;
+
+	/* RPIO Setting: [7:4] LVDS driver bias current : 75% (250mV swing) */
+	err = ps8622_set(cl, 0x04, 0x00, 0xb0);
+	if (err)
+		goto error;
+
+	/* [7:6] Right-bar GPIO output strength is 8mA */
+	err = ps8622_set(cl, 0x04, 0x15, 0x40);
+	if (err)
+		goto error;
+
+	/* EQ Training State Machine Setting, RCO calibration start */
+	err = ps8622_set(cl, 0x04, 0x54, 0x10);
+	if (err)
+		goto error;
+
+	/* Logic, needs more than 10 I2C command */
+	/* [4:0] MAX_LANE_COUNT set to max supported lanes */
+	err = ps8622_set(cl, 0x01, 0x02, 0x80 | ps8622->max_lane_count);
+	if (err)
+		goto error;
+
+	/* [4:0] LANE_COUNT_SET set to chosen lane count */
+	err = ps8622_set(cl, 0x01, 0x21, 0x80 | ps8622->lane_count);
+	if (err)
+		goto error;
+
+	err = ps8622_set(cl, 0x00, 0x52, 0x20);
+	if (err)
+		goto error;
+
+	/* HPD CP toggle enable */
+	err = ps8622_set(cl, 0x00, 0xf1, 0x03);
+	if (err)
+		goto error;
+
+	err = ps8622_set(cl, 0x00, 0x62, 0x41);
+	if (err)
+		goto error;
+
+	/* Counter number, add 1ms counter delay */
+	err = ps8622_set(cl, 0x00, 0xf6, 0x01);
+	if (err)
+		goto error;
+
+	/* [6]PWM function control by DPCD0040f[7], default is PWM block */
+	err = ps8622_set(cl, 0x00, 0x77, 0x06);
+	if (err)
+		goto error;
+
+	/* 04h Adjust VTotal toleranceto fix the 30Hz no display issue */
+	err = ps8622_set(cl, 0x00, 0x4c, 0x04);
+	if (err)
+		goto error;
+
+	/* DPCD00400='h00, Parade OUI ='h001cf8 */
+	err = ps8622_set(cl, 0x01, 0xc0, 0x00);
+	if (err)
+		goto error;
+
+	/* DPCD00401='h1c */
+	err = ps8622_set(cl, 0x01, 0xc1, 0x1c);
+	if (err)
+		goto error;
+
+	/* DPCD00402='hf8 */
+	err = ps8622_set(cl, 0x01, 0xc2, 0xf8);
+	if (err)
+		goto error;
+
+	/* DPCD403~408 = ASCII code, D2SLV5='h4432534c5635 */
+	err = ps8622_set(cl, 0x01, 0xc3, 0x44);
+	if (err)
+		goto error;
+
+	/* DPCD404 */
+	err = ps8622_set(cl, 0x01, 0xc4, 0x32);
+	if (err)
+		goto error;
+
+	/* DPCD405 */
+	err = ps8622_set(cl, 0x01, 0xc5, 0x53);
+	if (err)
+		goto error;
+
+	/* DPCD406 */
+	err = ps8622_set(cl, 0x01, 0xc6, 0x4c);
+	if (err)
+		goto error;
+
+	/* DPCD407 */
+	err = ps8622_set(cl, 0x01, 0xc7, 0x56);
+	if (err)
+		goto error;
+
+	/* DPCD408 */
+	err = ps8622_set(cl, 0x01, 0xc8, 0x35);
+	if (err)
+		goto error;
+
+	/* DPCD40A, Initial Code major revision '01' */
+	err = ps8622_set(cl, 0x01, 0xca, 0x01);
+	if (err)
+		goto error;
+
+	/* DPCD40B, Initial Code minor revision '05' */
+	err = ps8622_set(cl, 0x01, 0xcb, 0x05);
+	if (err)
+		goto error;
+
+
+	if (ps8622->bl) {
+		/* DPCD720, internal PWM */
+		err = ps8622_set(cl, 0x01, 0xa5, 0xa0);
+		if (err)
+			goto error;
+
+		/* FFh for 100% brightness, 0h for 0% brightness */
+		err = ps8622_set(cl, 0x01, 0xa7,
+				ps8622->bl->props.brightness);
+		if (err)
+			goto error;
+	} else {
+		/* DPCD720, external PWM */
+		err = ps8622_set(cl, 0x01, 0xa5, 0x80);
+		if (err)
+			goto error;
+	}
+
+	/* Set LVDS output as 6bit-VESA mapping, single LVDS channel */
+	err = ps8622_set(cl, 0x01, 0xcc, 0x13);
+	if (err)
+		goto error;
+
+	/* Enable SSC set by register */
+	err = ps8622_set(cl, 0x02, 0xb1, 0x20);
+	if (err)
+		goto error;
+
+	/* Set SSC enabled and +/-1% central spreading */
+	err = ps8622_set(cl, 0x04, 0x10, 0x16);
+	if (err)
+		goto error;
+
+	/* Logic end */
+	/* MPU Clock source: LC => RCO */
+	err = ps8622_set(cl, 0x04, 0x59, 0x60);
+	if (err)
+		goto error;
+
+	/* LC -> RCO */
+	err = ps8622_set(cl, 0x04, 0x54, 0x14);
+	if (err)
+		goto error;
+
+	/* HPD high */
+	err = ps8622_set(cl, 0x02, 0xa1, 0x91);
+
+error:
+	return err ? -EIO : 0;
+}
+
+static int ps8622_backlight_update(struct backlight_device *bl)
+{
+	struct ps8622_bridge *ps8622 = dev_get_drvdata(&bl->dev);
+	int ret, brightness = bl->props.brightness;
+
+	if (bl->props.power != FB_BLANK_UNBLANK ||
+	    bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
+		brightness = 0;
+
+	if (!ps8622->enabled)
+		return -EINVAL;
+
+	ret = ps8622_set(ps8622->client, 0x01, 0xa7, brightness);
+
+	return ret;
+}
+
+static const struct backlight_ops ps8622_backlight_ops = {
+	.update_status	= ps8622_backlight_update,
+};
+
+static void ps8622_pre_enable(struct drm_bridge *bridge)
+{
+	struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
+	int ret;
+
+	if (ps8622->enabled)
+		return;
+
+	gpiod_set_value(ps8622->gpio_rst, 0);
+
+	if (ps8622->v12) {
+		ret = regulator_enable(ps8622->v12);
+		if (ret)
+			DRM_ERROR("fails to enable ps8622->v12");
+	}
+
+	if (drm_panel_prepare(ps8622->panel)) {
+		DRM_ERROR("failed to prepare panel\n");
+		return;
+	}
+
+	gpiod_set_value(ps8622->gpio_slp, 1);
+
+	/*
+	 * T1 is the range of time that it takes for the power to rise after we
+	 * enable the lcd/ps8622 fet. T2 is the range of time in which the
+	 * data sheet specifies we should deassert the reset pin.
+	 *
+	 * If it takes T1.max for the power to rise, we need to wait atleast
+	 * T2.min before deasserting the reset pin. If it takes T1.min for the
+	 * power to rise, we need to wait at most T2.max before deasserting the
+	 * reset pin.
+	 */
+	usleep_range(PS8622_RST_HIGH_T2_MIN_US + PS8622_POWER_RISE_T1_MAX_US,
+		     PS8622_RST_HIGH_T2_MAX_US + PS8622_POWER_RISE_T1_MIN_US);
+
+	gpiod_set_value(ps8622->gpio_rst, 1);
+
+	/* wait 20ms after RST high */
+	usleep_range(20000, 30000);
+
+	ret = ps8622_send_config(ps8622);
+	if (ret) {
+		DRM_ERROR("Failed to send config to bridge (%d)\n", ret);
+		return;
+	}
+
+	ps8622->enabled = true;
+}
+
+static void ps8622_enable(struct drm_bridge *bridge)
+{
+	struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
+
+	if (drm_panel_enable(ps8622->panel)) {
+		DRM_ERROR("failed to enable panel\n");
+		return;
+	}
+}
+
+static void ps8622_disable(struct drm_bridge *bridge)
+{
+	struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
+
+	if (drm_panel_disable(ps8622->panel)) {
+		DRM_ERROR("failed to disable panel\n");
+		return;
+	}
+	msleep(PS8622_PWMO_END_T12_MS);
+}
+
+static void ps8622_post_disable(struct drm_bridge *bridge)
+{
+	struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
+
+	if (!ps8622->enabled)
+		return;
+
+	ps8622->enabled = false;
+
+	/*
+	 * This doesn't matter if the regulators are turned off, but something
+	 * else might keep them on. In that case, we want to assert the slp gpio
+	 * to lower power.
+	 */
+	gpiod_set_value(ps8622->gpio_slp, 0);
+
+	if (drm_panel_unprepare(ps8622->panel)) {
+		DRM_ERROR("failed to unprepare panel\n");
+		return;
+	}
+
+	if (ps8622->v12)
+		regulator_disable(ps8622->v12);
+
+	/*
+	 * Sleep for at least the amount of time that it takes the power rail to
+	 * fall to prevent asserting the rst gpio from doing anything.
+	 */
+	usleep_range(PS8622_POWER_FALL_T16_MAX_US,
+		     2 * PS8622_POWER_FALL_T16_MAX_US);
+	gpiod_set_value(ps8622->gpio_rst, 0);
+
+	msleep(PS8622_POWER_OFF_T17_MS);
+}
+
+static int ps8622_get_modes(struct drm_connector *connector)
+{
+	struct ps8622_bridge *ps8622;
+
+	ps8622 = connector_to_ps8622(connector);
+
+	return drm_panel_get_modes(ps8622->panel);
+}
+
+static struct drm_encoder *ps8622_best_encoder(struct drm_connector *connector)
+{
+	struct ps8622_bridge *ps8622;
+
+	ps8622 = connector_to_ps8622(connector);
+
+	return ps8622->bridge.encoder;
+}
+
+static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = {
+	.get_modes = ps8622_get_modes,
+	.best_encoder = ps8622_best_encoder,
+};
+
+static enum drm_connector_status ps8622_detect(struct drm_connector *connector,
+								bool force)
+{
+	return connector_status_connected;
+}
+
+static void ps8622_connector_destroy(struct drm_connector *connector)
+{
+	drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs ps8622_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.detect = ps8622_detect,
+	.destroy = ps8622_connector_destroy,
+};
+
+static int ps8622_attach(struct drm_bridge *bridge)
+{
+	struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
+	int ret;
+
+	if (!bridge->encoder) {
+		DRM_ERROR("Parent encoder object not found");
+		return -ENODEV;
+	}
+
+	ps8622->connector.polled = DRM_CONNECTOR_POLL_HPD;
+	ret = drm_connector_init(bridge->dev, &ps8622->connector,
+			&ps8622_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
+	if (ret) {
+		DRM_ERROR("Failed to initialize connector with drm\n");
+		return ret;
+	}
+	drm_connector_helper_add(&ps8622->connector,
+					&ps8622_connector_helper_funcs);
+	drm_connector_register(&ps8622->connector);
+	drm_mode_connector_attach_encoder(&ps8622->connector,
+							bridge->encoder);
+
+	if (ps8622->panel)
+		drm_panel_attach(ps8622->panel, &ps8622->connector);
+
+	drm_helper_hpd_irq_event(ps8622->connector.dev);
+
+	return ret;
+}
+
+static const struct drm_bridge_funcs ps8622_bridge_funcs = {
+	.pre_enable = ps8622_pre_enable,
+	.enable = ps8622_enable,
+	.disable = ps8622_disable,
+	.post_disable = ps8622_post_disable,
+	.attach = ps8622_attach,
+};
+
+static const struct of_device_id ps8622_devices[] = {
+	{.compatible = "parade,ps8622",},
+	{.compatible = "parade,ps8625",},
+	{}
+};
+MODULE_DEVICE_TABLE(of, ps8622_devices);
+
+static int ps8622_probe(struct i2c_client *client,
+					const struct i2c_device_id *id)
+{
+	struct device *dev = &client->dev;
+	struct device_node *endpoint, *panel_node;
+	struct ps8622_bridge *ps8622;
+	int ret;
+
+	ps8622 = devm_kzalloc(dev, sizeof(*ps8622), GFP_KERNEL);
+	if (!ps8622)
+		return -ENOMEM;
+
+	endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+	if (endpoint) {
+		panel_node = of_graph_get_remote_port_parent(endpoint);
+		if (panel_node) {
+			ps8622->panel = of_drm_find_panel(panel_node);
+			of_node_put(panel_node);
+			if (!ps8622->panel)
+				return -EPROBE_DEFER;
+		}
+	}
+
+	ps8622->client = client;
+
+	ps8622->v12 = devm_regulator_get(dev, "vdd12");
+	if (IS_ERR(ps8622->v12)) {
+		dev_info(dev, "no 1.2v regulator found for PS8622\n");
+		ps8622->v12 = NULL;
+	}
+
+	ps8622->gpio_slp = devm_gpiod_get(dev, "sleep");
+	if (IS_ERR(ps8622->gpio_slp)) {
+		ret = PTR_ERR(ps8622->gpio_slp);
+		dev_err(dev, "cannot get gpio_slp %d\n", ret);
+		return ret;
+	}
+	ret = gpiod_direction_output(ps8622->gpio_slp, 1);
+	if (ret) {
+		dev_err(dev, "cannot configure gpio_slp\n");
+		return ret;
+	}
+
+	ps8622->gpio_rst = devm_gpiod_get(dev, "reset");
+	if (IS_ERR(ps8622->gpio_rst)) {
+		ret = PTR_ERR(ps8622->gpio_rst);
+		dev_err(dev, "cannot get gpio_rst %d\n", ret);
+		return ret;
+	}
+	/*
+	 * Assert the reset pin high to avoid the bridge being
+	 * initialized prematurely
+	 */
+	ret = gpiod_direction_output(ps8622->gpio_rst, 1);
+	if (ret) {
+		dev_err(dev, "cannot configure gpio_rst\n");
+		return ret;
+	}
+
+	ps8622->max_lane_count = id->driver_data;
+
+	if (of_property_read_u32(dev->of_node, "lane-count",
+						&ps8622->lane_count)) {
+		ps8622->lane_count = ps8622->max_lane_count;
+	} else if (ps8622->lane_count > ps8622->max_lane_count) {
+		dev_info(dev, "lane-count property is too high,"
+						"using max_lane_count\n");
+		ps8622->lane_count = ps8622->max_lane_count;
+	}
+
+	if (!of_find_property(dev->of_node, "use-external-pwm", NULL)) {
+		ps8622->bl = backlight_device_register("ps8622-backlight",
+				dev, ps8622, &ps8622_backlight_ops,
+				NULL);
+		if (IS_ERR(ps8622->bl)) {
+			DRM_ERROR("failed to register backlight\n");
+			ret = PTR_ERR(ps8622->bl);
+			ps8622->bl = NULL;
+			return ret;
+		}
+		ps8622->bl->props.max_brightness = PS8622_MAX_BRIGHTNESS;
+		ps8622->bl->props.brightness = PS8622_MAX_BRIGHTNESS;
+	}
+
+	ps8622->bridge.funcs = &ps8622_bridge_funcs;
+	ps8622->bridge.of_node = dev->of_node;
+	ret = drm_bridge_add(&ps8622->bridge);
+	if (ret) {
+		DRM_ERROR("Failed to add bridge\n");
+		return ret;
+	}
+
+	i2c_set_clientdata(client, ps8622);
+
+	return 0;
+}
+
+static int ps8622_remove(struct i2c_client *client)
+{
+	struct ps8622_bridge *ps8622 = i2c_get_clientdata(client);
+
+	if (ps8622->bl)
+		backlight_device_unregister(ps8622->bl);
+
+	drm_bridge_remove(&ps8622->bridge);
+
+	return 0;
+}
+
+static const struct i2c_device_id ps8622_i2c_table[] = {
+	/* Device type, max_lane_count */
+	{"ps8622", 1},
+	{"ps8625", 2},
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, ps8622_i2c_table);
+
+static struct i2c_driver ps8622_driver = {
+	.id_table	= ps8622_i2c_table,
+	.probe		= ps8622_probe,
+	.remove		= ps8622_remove,
+	.driver		= {
+		.name	= "ps8622",
+		.owner	= THIS_MODULE,
+		.of_match_table = ps8622_devices,
+	},
+};
+module_i2c_driver(ps8622_driver);
+
+MODULE_AUTHOR("Vincent Palatin <vpalatin@chromium.org>");
+MODULE_DESCRIPTION("Parade ps8622/ps8625 eDP-LVDS converter driver");
+MODULE_LICENSE("GPL v2");

+ 1 - 1
drivers/gpu/drm/bridge/ptn3460.c

@@ -265,7 +265,7 @@ static struct drm_connector_funcs ptn3460_connector_funcs = {
 	.destroy = ptn3460_connector_destroy,
 };
 
-int ptn3460_bridge_attach(struct drm_bridge *bridge)
+static int ptn3460_bridge_attach(struct drm_bridge *bridge)
 {
 	struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
 	int ret;

+ 22 - 5
drivers/gpu/drm/drm_atomic.c

@@ -134,6 +134,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
 
 		connector->funcs->atomic_destroy_state(connector,
 						       state->connector_states[i]);
+		state->connectors[i] = NULL;
 		state->connector_states[i] = NULL;
 	}
 
@@ -145,6 +146,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
 
 		crtc->funcs->atomic_destroy_state(crtc,
 						  state->crtc_states[i]);
+		state->crtcs[i] = NULL;
 		state->crtc_states[i] = NULL;
 	}
 
@@ -156,6 +158,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
 
 		plane->funcs->atomic_destroy_state(plane,
 						   state->plane_states[i]);
+		state->planes[i] = NULL;
 		state->plane_states[i] = NULL;
 	}
 }
@@ -170,6 +173,9 @@ EXPORT_SYMBOL(drm_atomic_state_clear);
  */
 void drm_atomic_state_free(struct drm_atomic_state *state)
 {
+	if (!state)
+		return;
+
 	drm_atomic_state_clear(state);
 
 	DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
@@ -248,11 +254,14 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
 	struct drm_mode_config *config = &dev->mode_config;
 
 	/* FIXME: Mode prop is missing, which also controls ->enable. */
-	if (property == config->prop_active) {
+	if (property == config->prop_active)
 		state->active = val;
-	} else if (crtc->funcs->atomic_set_property)
+	else if (crtc->funcs->atomic_set_property)
 		return crtc->funcs->atomic_set_property(crtc, state, property, val);
-	return -EINVAL;
+	else
+		return -EINVAL;
+
+	return 0;
 }
 EXPORT_SYMBOL(drm_atomic_crtc_set_property);
 
@@ -266,9 +275,17 @@ int drm_atomic_crtc_get_property(struct drm_crtc *crtc,
 		const struct drm_crtc_state *state,
 		struct drm_property *property, uint64_t *val)
 {
-	if (crtc->funcs->atomic_get_property)
+	struct drm_device *dev = crtc->dev;
+	struct drm_mode_config *config = &dev->mode_config;
+
+	if (property == config->prop_active)
+		*val = state->active;
+	else if (crtc->funcs->atomic_get_property)
 		return crtc->funcs->atomic_get_property(crtc, state, property, val);
-	return -EINVAL;
+	else
+		return -EINVAL;
+
+	return 0;
 }
 
 /**

+ 157 - 43
drivers/gpu/drm/drm_atomic_helper.c

@@ -151,7 +151,7 @@ steal_encoder(struct drm_atomic_state *state,
 static int
 update_connector_routing(struct drm_atomic_state *state, int conn_idx)
 {
-	struct drm_connector_helper_funcs *funcs;
+	const struct drm_connector_helper_funcs *funcs;
 	struct drm_encoder *new_encoder;
 	struct drm_crtc *encoder_crtc;
 	struct drm_connector *connector;
@@ -264,7 +264,7 @@ mode_fixup(struct drm_atomic_state *state)
 	}
 
 	for (i = 0; i < state->num_connector; i++) {
-		struct drm_encoder_helper_funcs *funcs;
+		const struct drm_encoder_helper_funcs *funcs;
 		struct drm_encoder *encoder;
 
 		conn_state = state->connector_states[i];
@@ -317,7 +317,7 @@ mode_fixup(struct drm_atomic_state *state)
 	}
 
 	for (i = 0; i < ncrtcs; i++) {
-		struct drm_crtc_helper_funcs *funcs;
+		const struct drm_crtc_helper_funcs *funcs;
 		struct drm_crtc *crtc;
 
 		crtc_state = state->crtc_states[i];
@@ -346,7 +346,7 @@ needs_modeset(struct drm_crtc_state *state)
 }
 
 /**
- * drm_atomic_helper_check - validate state object for modeset changes
+ * drm_atomic_helper_check_modeset - validate state object for modeset changes
  * @dev: DRM device
  * @state: the driver state object
  *
@@ -461,7 +461,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
 EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
 
 /**
- * drm_atomic_helper_check - validate state object for modeset changes
+ * drm_atomic_helper_check_planes - validate state object for planes changes
  * @dev: DRM device
  * @state: the driver state object
  *
@@ -481,7 +481,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
 	int i, ret = 0;
 
 	for (i = 0; i < nplanes; i++) {
-		struct drm_plane_helper_funcs *funcs;
+		const struct drm_plane_helper_funcs *funcs;
 		struct drm_plane *plane = state->planes[i];
 		struct drm_plane_state *plane_state = state->plane_states[i];
 
@@ -504,7 +504,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
 	}
 
 	for (i = 0; i < ncrtcs; i++) {
-		struct drm_crtc_helper_funcs *funcs;
+		const struct drm_crtc_helper_funcs *funcs;
 		struct drm_crtc *crtc = state->crtcs[i];
 
 		if (!crtc)
@@ -571,9 +571,9 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
 	int i;
 
 	for (i = 0; i < old_state->num_connector; i++) {
+		const struct drm_encoder_helper_funcs *funcs;
 		struct drm_connector_state *old_conn_state;
 		struct drm_connector *connector;
-		struct drm_encoder_helper_funcs *funcs;
 		struct drm_encoder *encoder;
 		struct drm_crtc_state *old_crtc_state;
 
@@ -587,7 +587,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
 
 		old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)];
 
-		if (!old_crtc_state->active)
+		if (!old_crtc_state->active ||
+		    !needs_modeset(old_conn_state->crtc->state))
 			continue;
 
 		encoder = old_conn_state->best_encoder;
@@ -605,7 +606,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
 
 		/*
 		 * Each encoder has at most one connector (since we always steal
-		 * it away), so we won't call call disable hooks twice.
+		 * it away), so we won't call disable hooks twice.
 		 */
 		if (encoder->bridge)
 			encoder->bridge->funcs->disable(encoder->bridge);
@@ -623,7 +624,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
 	}
 
 	for (i = 0; i < ncrtcs; i++) {
-		struct drm_crtc_helper_funcs *funcs;
+		const struct drm_crtc_helper_funcs *funcs;
 		struct drm_crtc *crtc;
 		struct drm_crtc_state *old_crtc_state;
 
@@ -713,7 +714,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
 	int i;
 
 	for (i = 0; i < ncrtcs; i++) {
-		struct drm_crtc_helper_funcs *funcs;
+		const struct drm_crtc_helper_funcs *funcs;
 		struct drm_crtc *crtc;
 
 		crtc = old_state->crtcs[i];
@@ -732,9 +733,9 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
 	}
 
 	for (i = 0; i < old_state->num_connector; i++) {
+		const struct drm_encoder_helper_funcs *funcs;
 		struct drm_connector *connector;
 		struct drm_crtc_state *new_crtc_state;
-		struct drm_encoder_helper_funcs *funcs;
 		struct drm_encoder *encoder;
 		struct drm_display_mode *mode, *adjusted_mode;
 
@@ -757,7 +758,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
 
 		/*
 		 * Each encoder has at most one connector (since we always steal
-		 * it away), so we won't call call mode_set hooks twice.
+		 * it away), so we won't call mode_set hooks twice.
 		 */
 		if (funcs->mode_set)
 			funcs->mode_set(encoder, mode, adjusted_mode);
@@ -812,7 +813,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
 	int i;
 
 	for (i = 0; i < ncrtcs; i++) {
-		struct drm_crtc_helper_funcs *funcs;
+		const struct drm_crtc_helper_funcs *funcs;
 		struct drm_crtc *crtc;
 
 		crtc = old_state->crtcs[i];
@@ -838,8 +839,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
 	}
 
 	for (i = 0; i < old_state->num_connector; i++) {
+		const struct drm_encoder_helper_funcs *funcs;
 		struct drm_connector *connector;
-		struct drm_encoder_helper_funcs *funcs;
 		struct drm_encoder *encoder;
 
 		connector = old_state->connectors[i];
@@ -847,7 +848,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
 		if (!connector || !connector->state->best_encoder)
 			continue;
 
-		if (!connector->state->crtc->state->active)
+		if (!connector->state->crtc->state->active ||
+		    !needs_modeset(connector->state->crtc->state))
 			continue;
 
 		encoder = connector->state->best_encoder;
@@ -858,7 +860,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
 
 		/*
 		 * Each encoder has at most one connector (since we always steal
-		 * it away), so we won't call call enable hooks twice.
+		 * it away), so we won't call enable hooks twice.
 		 */
 		if (encoder->bridge)
 			encoder->bridge->funcs->pre_enable(encoder->bridge);
@@ -1025,7 +1027,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
 
 	/*
 	 * Everything below can be run asynchronously without the need to grab
-	 * any modeset locks at all under one conditions: It must be guaranteed
+	 * any modeset locks at all under one condition: It must be guaranteed
 	 * that the asynchronous work has either been cancelled (if the driver
 	 * supports it, which at least requires that the framebuffers get
 	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
@@ -1114,7 +1116,7 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
 	int ret, i;
 
 	for (i = 0; i < nplanes; i++) {
-		struct drm_plane_helper_funcs *funcs;
+		const struct drm_plane_helper_funcs *funcs;
 		struct drm_plane *plane = state->planes[i];
 		struct drm_plane_state *plane_state = state->plane_states[i];
 		struct drm_framebuffer *fb;
@@ -1137,7 +1139,7 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
 
 fail:
 	for (i--; i >= 0; i--) {
-		struct drm_plane_helper_funcs *funcs;
+		const struct drm_plane_helper_funcs *funcs;
 		struct drm_plane *plane = state->planes[i];
 		struct drm_plane_state *plane_state = state->plane_states[i];
 		struct drm_framebuffer *fb;
@@ -1179,7 +1181,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
 	int i;
 
 	for (i = 0; i < ncrtcs; i++) {
-		struct drm_crtc_helper_funcs *funcs;
+		const struct drm_crtc_helper_funcs *funcs;
 		struct drm_crtc *crtc = old_state->crtcs[i];
 
 		if (!crtc)
@@ -1194,7 +1196,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
 	}
 
 	for (i = 0; i < nplanes; i++) {
-		struct drm_plane_helper_funcs *funcs;
+		const struct drm_plane_helper_funcs *funcs;
 		struct drm_plane *plane = old_state->planes[i];
 		struct drm_plane_state *old_plane_state;
 
@@ -1219,7 +1221,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
 	}
 
 	for (i = 0; i < ncrtcs; i++) {
-		struct drm_crtc_helper_funcs *funcs;
+		const struct drm_crtc_helper_funcs *funcs;
 		struct drm_crtc *crtc = old_state->crtcs[i];
 
 		if (!crtc)
@@ -1254,7 +1256,7 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
 	int i;
 
 	for (i = 0; i < nplanes; i++) {
-		struct drm_plane_helper_funcs *funcs;
+		const struct drm_plane_helper_funcs *funcs;
 		struct drm_plane *plane = old_state->planes[i];
 		struct drm_plane_state *plane_state = old_state->plane_states[i];
 		struct drm_framebuffer *old_fb;
@@ -2001,10 +2003,10 @@ retry:
 	WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
 	list_for_each_entry(tmp_connector, &config->connector_list, head) {
-		if (connector->state->crtc != crtc)
+		if (tmp_connector->state->crtc != crtc)
 			continue;
 
-		if (connector->dpms == DRM_MODE_DPMS_ON) {
+		if (tmp_connector->dpms == DRM_MODE_DPMS_ON) {
 			active = true;
 			break;
 		}
@@ -2066,6 +2068,26 @@ void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
 }
 EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
 
+/**
+ * __drm_atomic_helper_crtc_duplicate_state - copy atomic CRTC state
+ * @crtc: CRTC object
+ * @state: atomic CRTC state
+ *
+ * Copies atomic state from a CRTC's current state and resets inferred values.
+ * This is useful for drivers that subclass the CRTC state.
+ */
+void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
+					      struct drm_crtc_state *state)
+{
+	memcpy(state, crtc->state, sizeof(*state));
+
+	state->mode_changed = false;
+	state->active_changed = false;
+	state->planes_changed = false;
+	state->event = NULL;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
+
 /**
  * drm_atomic_helper_crtc_duplicate_state - default state duplicate hook
  * @crtc: drm CRTC
@@ -2081,19 +2103,34 @@ drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
 	if (WARN_ON(!crtc->state))
 		return NULL;
 
-	state = kmemdup(crtc->state, sizeof(*crtc->state), GFP_KERNEL);
-
-	if (state) {
-		state->mode_changed = false;
-		state->active_changed = false;
-		state->planes_changed = false;
-		state->event = NULL;
-	}
+	state = kmalloc(sizeof(*state), GFP_KERNEL);
+	if (state)
+		__drm_atomic_helper_crtc_duplicate_state(crtc, state);
 
 	return state;
 }
 EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
 
+/**
+ * __drm_atomic_helper_crtc_destroy_state - release CRTC state
+ * @crtc: CRTC object
+ * @state: CRTC state object to release
+ *
+ * Releases all resources stored in the CRTC state without actually freeing
+ * the memory of the CRTC state. This is useful for drivers that subclass the
+ * CRTC state.
+ */
+void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+					    struct drm_crtc_state *state)
+{
+	/*
+	 * This is currently a placeholder so that drivers that subclass the
+	 * state will automatically do the right thing if code is ever added
+	 * to this function.
+	 */
+}
+EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
+
 /**
  * drm_atomic_helper_crtc_destroy_state - default state destroy hook
  * @crtc: drm CRTC
@@ -2105,6 +2142,7 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
 void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
 					  struct drm_crtc_state *state)
 {
+	__drm_atomic_helper_crtc_destroy_state(crtc, state);
 	kfree(state);
 }
 EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
@@ -2129,6 +2167,24 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane)
 }
 EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
 
+/**
+ * __drm_atomic_helper_plane_duplicate_state - copy atomic plane state
+ * @plane: plane object
+ * @state: atomic plane state
+ *
+ * Copies atomic state from a plane's current state. This is useful for
+ * drivers that subclass the plane state.
+ */
+void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
+					       struct drm_plane_state *state)
+{
+	memcpy(state, plane->state, sizeof(*state));
+
+	if (state->fb)
+		drm_framebuffer_reference(state->fb);
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
+
 /**
  * drm_atomic_helper_plane_duplicate_state - default state duplicate hook
  * @plane: drm plane
@@ -2144,15 +2200,31 @@ drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
 	if (WARN_ON(!plane->state))
 		return NULL;
 
-	state = kmemdup(plane->state, sizeof(*plane->state), GFP_KERNEL);
-
-	if (state && state->fb)
-		drm_framebuffer_reference(state->fb);
+	state = kmalloc(sizeof(*state), GFP_KERNEL);
+	if (state)
+		__drm_atomic_helper_plane_duplicate_state(plane, state);
 
 	return state;
 }
 EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
 
+/**
+ * __drm_atomic_helper_plane_destroy_state - release plane state
+ * @plane: plane object
+ * @state: plane state object to release
+ *
+ * Releases all resources stored in the plane state without actually freeing
+ * the memory of the plane state. This is useful for drivers that subclass the
+ * plane state.
+ */
+void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+					     struct drm_plane_state *state)
+{
+	if (state->fb)
+		drm_framebuffer_unreference(state->fb);
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
+
 /**
  * drm_atomic_helper_plane_destroy_state - default state destroy hook
  * @plane: drm plane
@@ -2164,9 +2236,7 @@ EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
 void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
 					   struct drm_plane_state *state)
 {
-	if (state->fb)
-		drm_framebuffer_unreference(state->fb);
-
+	__drm_atomic_helper_plane_destroy_state(plane, state);
 	kfree(state);
 }
 EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
@@ -2189,6 +2259,22 @@ void drm_atomic_helper_connector_reset(struct drm_connector *connector)
 }
 EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
 
+/**
+ * __drm_atomic_helper_connector_duplicate_state - copy atomic connector state
+ * @connector: connector object
+ * @state: atomic connector state
+ *
+ * Copies atomic state from a connector's current state. This is useful for
+ * drivers that subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
+					    struct drm_connector_state *state)
+{
+	memcpy(state, connector->state, sizeof(*state));
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
+
 /**
  * drm_atomic_helper_connector_duplicate_state - default state duplicate hook
  * @connector: drm connector
@@ -2199,13 +2285,40 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
 struct drm_connector_state *
 drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
 {
+	struct drm_connector_state *state;
+
 	if (WARN_ON(!connector->state))
 		return NULL;
 
-	return kmemdup(connector->state, sizeof(*connector->state), GFP_KERNEL);
+	state = kmalloc(sizeof(*state), GFP_KERNEL);
+	if (state)
+		__drm_atomic_helper_connector_duplicate_state(connector, state);
+
+	return state;
 }
 EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
 
+/**
+ * __drm_atomic_helper_connector_destroy_state - release connector state
+ * @connector: connector object
+ * @state: connector state object to release
+ *
+ * Releases all resources stored in the connector state without actually
+ * freeing the memory of the connector state. This is useful for drivers that
+ * subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+					    struct drm_connector_state *state)
+{
+	/*
+	 * This is currently a placeholder so that drivers that subclass the
+	 * state will automatically do the right thing if code is ever added
+	 * to this function.
+	 */
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
+
 /**
  * drm_atomic_helper_connector_destroy_state - default state destroy hook
  * @connector: drm connector
@@ -2217,6 +2330,7 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
 void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
 					  struct drm_connector_state *state)
 {
+	__drm_atomic_helper_connector_destroy_state(connector, state);
 	kfree(state);
 }
 EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);

+ 1 - 1
drivers/gpu/drm/drm_bridge.c

@@ -49,7 +49,7 @@ void drm_bridge_remove(struct drm_bridge *bridge)
 }
 EXPORT_SYMBOL(drm_bridge_remove);
 
-extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge)
+int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge)
 {
 	if (!dev || !bridge)
 		return -EINVAL;

+ 5 - 0
drivers/gpu/drm/drm_crtc.c

@@ -660,6 +660,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
 	struct drm_mode_config *config = &dev->mode_config;
 	int ret;
 
+	WARN_ON(primary && primary->type != DRM_PLANE_TYPE_PRIMARY);
+	WARN_ON(cursor && cursor->type != DRM_PLANE_TYPE_CURSOR);
+
 	crtc->dev = dev;
 	crtc->funcs = funcs;
 	crtc->invert_dimensions = false;
@@ -5619,6 +5622,7 @@ struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
 	mutex_unlock(&dev->mode_config.idr_mutex);
 	return NULL;
 }
+EXPORT_SYMBOL(drm_mode_get_tile_group);
 
 /**
  * drm_mode_create_tile_group - create a tile group from a displayid description
@@ -5657,3 +5661,4 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
 	mutex_unlock(&dev->mode_config.idr_mutex);
 	return tg;
 }
+EXPORT_SYMBOL(drm_mode_create_tile_group);

+ 5 - 4
drivers/gpu/drm/drm_crtc_helper.c

@@ -270,7 +270,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
 			      struct drm_framebuffer *old_fb)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_display_mode *adjusted_mode, saved_mode;
+	struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
 	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 	struct drm_encoder_helper_funcs *encoder_funcs;
 	int saved_x, saved_y;
@@ -292,6 +292,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
 	}
 
 	saved_mode = crtc->mode;
+	saved_hwmode = crtc->hwmode;
 	saved_x = crtc->x;
 	saved_y = crtc->y;
 
@@ -334,6 +335,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
 	}
 	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 
+	crtc->hwmode = *adjusted_mode;
+
 	/* Prepare the encoders and CRTCs before setting the mode. */
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 
@@ -396,9 +399,6 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
 			encoder->bridge->funcs->enable(encoder->bridge);
 	}
 
-	/* Store real post-adjustment hardware mode. */
-	crtc->hwmode = *adjusted_mode;
-
 	/* Calculate and store various constants which
 	 * are later needed by vblank and swap-completion
 	 * timestamping. They are derived from true hwmode.
@@ -411,6 +411,7 @@ done:
 	if (!ret) {
 		crtc->enabled = saved_enabled;
 		crtc->mode = saved_mode;
+		crtc->hwmode = saved_hwmode;
 		crtc->x = saved_x;
 		crtc->y = saved_y;
 	}

+ 60 - 20
drivers/gpu/drm/drm_dp_helper.c

@@ -427,11 +427,13 @@ static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter)
  * retrying the transaction as appropriate.  It is assumed that the
  * aux->transfer function does not modify anything in the msg other than the
  * reply field.
+ *
+ * Returns bytes transferred on success, or a negative error code on failure.
  */
 static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
 {
 	unsigned int retry;
-	int err;
+	int ret;
 
 	/*
 	 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device
@@ -440,14 +442,14 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
 	 */
 	for (retry = 0; retry < 7; retry++) {
 		mutex_lock(&aux->hw_mutex);
-		err = aux->transfer(aux, msg);
+		ret = aux->transfer(aux, msg);
 		mutex_unlock(&aux->hw_mutex);
-		if (err < 0) {
-			if (err == -EBUSY)
+		if (ret < 0) {
+			if (ret == -EBUSY)
 				continue;
 
-			DRM_DEBUG_KMS("transaction failed: %d\n", err);
-			return err;
+			DRM_DEBUG_KMS("transaction failed: %d\n", ret);
+			return ret;
 		}
 
 
@@ -460,7 +462,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
 			break;
 
 		case DP_AUX_NATIVE_REPLY_NACK:
-			DRM_DEBUG_KMS("native nack\n");
+			DRM_DEBUG_KMS("native nack (result=%d, size=%zu)\n", ret, msg->size);
 			return -EREMOTEIO;
 
 		case DP_AUX_NATIVE_REPLY_DEFER:
@@ -488,12 +490,10 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
 			 * Both native ACK and I2C ACK replies received. We
 			 * can assume the transfer was successful.
 			 */
-			if (err < msg->size)
-				return -EPROTO;
-			return 0;
+			return ret;
 
 		case DP_AUX_I2C_REPLY_NACK:
-			DRM_DEBUG_KMS("I2C nack\n");
+			DRM_DEBUG_KMS("I2C nack (result=%d, size=%zu\n", ret, msg->size);
 			aux->i2c_nack_count++;
 			return -EREMOTEIO;
 
@@ -513,14 +513,55 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
 	return -EREMOTEIO;
 }
 
+/*
+ * Keep retrying drm_dp_i2c_do_msg until all data has been transferred.
+ *
+ * Returns an error code on failure, or a recommended transfer size on success.
+ */
+static int drm_dp_i2c_drain_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *orig_msg)
+{
+	int err, ret = orig_msg->size;
+	struct drm_dp_aux_msg msg = *orig_msg;
+
+	while (msg.size > 0) {
+		err = drm_dp_i2c_do_msg(aux, &msg);
+		if (err <= 0)
+			return err == 0 ? -EPROTO : err;
+
+		if (err < msg.size && err < ret) {
+			DRM_DEBUG_KMS("Partial I2C reply: requested %zu bytes got %d bytes\n",
+				      msg.size, err);
+			ret = err;
+		}
+
+		msg.size -= err;
+		msg.buffer += err;
+	}
+
+	return ret;
+}
+
+/*
+ * Bizlink designed DP->DVI-D Dual Link adapters require the I2C over AUX
+ * packets to be as large as possible. If not, the I2C transactions never
+ * succeed. Hence the default is maximum.
+ */
+static int dp_aux_i2c_transfer_size __read_mostly = DP_AUX_MAX_PAYLOAD_BYTES;
+module_param_unsafe(dp_aux_i2c_transfer_size, int, 0644);
+MODULE_PARM_DESC(dp_aux_i2c_transfer_size,
+		 "Number of bytes to transfer in a single I2C over DP AUX CH message, (1-16, default 16)");
+
 static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
 			   int num)
 {
 	struct drm_dp_aux *aux = adapter->algo_data;
 	unsigned int i, j;
+	unsigned transfer_size;
 	struct drm_dp_aux_msg msg;
 	int err = 0;
 
+	dp_aux_i2c_transfer_size = clamp(dp_aux_i2c_transfer_size, 1, DP_AUX_MAX_PAYLOAD_BYTES);
+
 	memset(&msg, 0, sizeof(msg));
 
 	for (i = 0; i < num; i++) {
@@ -538,20 +579,19 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
 		err = drm_dp_i2c_do_msg(aux, &msg);
 		if (err < 0)
 			break;
-		/*
-		 * Many hardware implementations support FIFOs larger than a
-		 * single byte, but it has been empirically determined that
-		 * transferring data in larger chunks can actually lead to
-		 * decreased performance. Therefore each message is simply
-		 * transferred byte-by-byte.
+		/* We want each transaction to be as large as possible, but
+		 * we'll go to smaller sizes if the hardware gives us a
+		 * short reply.
 		 */
-		for (j = 0; j < msgs[i].len; j++) {
+		transfer_size = dp_aux_i2c_transfer_size;
+		for (j = 0; j < msgs[i].len; j += msg.size) {
 			msg.buffer = msgs[i].buf + j;
-			msg.size = 1;
+			msg.size = min(transfer_size, msgs[i].len - j);
 
-			err = drm_dp_i2c_do_msg(aux, &msg);
+			err = drm_dp_i2c_drain_msg(aux, &msg);
 			if (err < 0)
 				break;
+			transfer_size = err;
 		}
 		if (err < 0)
 			break;

+ 13 - 0
drivers/gpu/drm/drm_dp_mst_topology.c

@@ -2324,6 +2324,19 @@ out:
 }
 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
 
+int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+	int slots = 0;
+	port = drm_dp_get_validated_port_ref(mgr, port);
+	if (!port)
+		return slots;
+
+	slots = port->vcpi.num_slots;
+	drm_dp_put_port(port);
+	return slots;
+}
+EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
+
 /**
  * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
  * @mgr: manager for this port

+ 1 - 1
drivers/gpu/drm/drm_drv.c

@@ -70,7 +70,7 @@ void drm_err(const char *format, ...)
 	vaf.fmt = format;
 	vaf.va = &args;
 
-	printk(KERN_ERR "[" DRM_NAME ":%pf] *ERROR* %pV",
+	printk(KERN_ERR "[" DRM_NAME ":%ps] *ERROR* %pV",
 	       __builtin_return_address(0), &vaf);
 
 	va_end(args);

+ 1 - 0
drivers/gpu/drm/drm_edid_load.c

@@ -287,6 +287,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
 
 	drm_mode_connector_update_edid_property(connector, edid);
 	ret = drm_add_edid_modes(connector, edid);
+	drm_edid_to_eld(connector, edid);
 	kfree(edid);
 
 	return ret;

+ 1 - 1
drivers/gpu/drm/drm_fb_cma_helper.c

@@ -304,7 +304,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
 	}
 
 	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
-	drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+	drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
 
 	offset = fbi->var.xoffset * bytes_per_pixel;
 	offset += fbi->var.yoffset * fb->pitches[0];

+ 38 - 16
drivers/gpu/drm/drm_fb_helper.c

@@ -1034,23 +1034,45 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
 	crtc_count = 0;
 	for (i = 0; i < fb_helper->crtc_count; i++) {
 		struct drm_display_mode *desired_mode;
-		int x, y;
+		struct drm_mode_set *mode_set;
+		int x, y, j;
+		/* in case of tile group, are we the last tile vert or horiz?
+		 * If no tile group you are always the last one both vertically
+		 * and horizontally
+		 */
+		bool lastv = true, lasth = true;
+
 		desired_mode = fb_helper->crtc_info[i].desired_mode;
+		mode_set = &fb_helper->crtc_info[i].mode_set;
+
+		if (!desired_mode)
+			continue;
+
+		crtc_count++;
+
 		x = fb_helper->crtc_info[i].x;
 		y = fb_helper->crtc_info[i].y;
-		if (desired_mode) {
-			if (gamma_size == 0)
-				gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
-			if (desired_mode->hdisplay + x < sizes.fb_width)
-				sizes.fb_width = desired_mode->hdisplay + x;
-			if (desired_mode->vdisplay + y < sizes.fb_height)
-				sizes.fb_height = desired_mode->vdisplay + y;
-			if (desired_mode->hdisplay + x > sizes.surface_width)
-				sizes.surface_width = desired_mode->hdisplay + x;
-			if (desired_mode->vdisplay + y > sizes.surface_height)
-				sizes.surface_height = desired_mode->vdisplay + y;
-			crtc_count++;
+
+		if (gamma_size == 0)
+			gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
+
+		sizes.surface_width  = max_t(u32, desired_mode->hdisplay + x, sizes.surface_width);
+		sizes.surface_height = max_t(u32, desired_mode->vdisplay + y, sizes.surface_height);
+
+		for (j = 0; j < mode_set->num_connectors; j++) {
+			struct drm_connector *connector = mode_set->connectors[j];
+			if (connector->has_tile) {
+				lasth = (connector->tile_h_loc == (connector->num_h_tile - 1));
+				lastv = (connector->tile_v_loc == (connector->num_v_tile - 1));
+				/* cloning to multiple tiles is just crazy-talk, so: */
+				break;
+			}
 		}
+
+		if (lasth)
+			sizes.fb_width  = min_t(u32, desired_mode->hdisplay + x, sizes.fb_width);
+		if (lastv)
+			sizes.fb_height = min_t(u32, desired_mode->vdisplay + y, sizes.fb_height);
 	}
 
 	if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
@@ -1261,12 +1283,12 @@ struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *f
 						      int width, int height)
 {
 	struct drm_cmdline_mode *cmdline_mode;
-	struct drm_display_mode *mode = NULL;
+	struct drm_display_mode *mode;
 	bool prefer_non_interlace;
 
 	cmdline_mode = &fb_helper_conn->connector->cmdline_mode;
 	if (cmdline_mode->specified == false)
-		return mode;
+		return NULL;
 
 	/* attempt to find a matching mode in the list of modes
 	 *  we have gotten so far, if not add a CVT mode that conforms
@@ -1275,7 +1297,7 @@ struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *f
 		goto create_mode;
 
 	prefer_non_interlace = !cmdline_mode->interlace;
- again:
+again:
 	list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
 		/* check width/height */
 		if (mode->hdisplay != cmdline_mode->xres ||

+ 1 - 0
drivers/gpu/drm/drm_info.c

@@ -37,6 +37,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_gem.h>
 
+#include "drm_internal.h"
 #include "drm_legacy.h"
 
 /**

+ 1 - 1
drivers/gpu/drm/drm_ioc32.c

@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
 	return 0;
 }
 
-drm_ioctl_compat_t *drm_compat_ioctls[] = {
+static drm_ioctl_compat_t *drm_compat_ioctls[] = {
 	[DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
 	[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
 	[DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,

+ 28 - 32
drivers/gpu/drm/drm_ioctl.c

@@ -524,8 +524,13 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
 	return 0;
 }
 
-#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
-	[DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
+#define DRM_IOCTL_DEF(ioctl, _func, _flags)	\
+	[DRM_IOCTL_NR(ioctl)] = {		\
+		.cmd = ioctl,			\
+		.func = _func,			\
+		.flags = _flags,		\
+		.name = #ioctl			\
+	}
 
 /** Ioctl table */
 static const struct drm_ioctl_desc drm_ioctls[] = {
@@ -663,39 +668,29 @@ long drm_ioctl(struct file *filp,
 	int retcode = -EINVAL;
 	char stack_kdata[128];
 	char *kdata = NULL;
-	unsigned int usize, asize;
+	unsigned int usize, asize, drv_size;
 
 	dev = file_priv->minor->dev;
 
 	if (drm_device_is_unplugged(dev))
 		return -ENODEV;
 
-	if ((nr >= DRM_CORE_IOCTL_COUNT) &&
-	    ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
-		goto err_i1;
-	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
-	    (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
-		u32 drv_size;
+	if (nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END) {
+		/* driver ioctl */
+		if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls)
+			goto err_i1;
 		ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
-		drv_size = _IOC_SIZE(ioctl->cmd_drv);
-		usize = asize = _IOC_SIZE(cmd);
-		if (drv_size > asize)
-			asize = drv_size;
-		cmd = ioctl->cmd_drv;
-	}
-	else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
-		u32 drv_size;
-
+	} else {
+		/* core ioctl */
+		if (nr >= DRM_CORE_IOCTL_COUNT)
+			goto err_i1;
 		ioctl = &drm_ioctls[nr];
+	}
 
-		drv_size = _IOC_SIZE(ioctl->cmd);
-		usize = asize = _IOC_SIZE(cmd);
-		if (drv_size > asize)
-			asize = drv_size;
-
-		cmd = ioctl->cmd;
-	} else
-		goto err_i1;
+	drv_size = _IOC_SIZE(ioctl->cmd);
+	usize = _IOC_SIZE(cmd);
+	asize = max(usize, drv_size);
+	cmd = ioctl->cmd;
 
 	DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
 		  task_pid_nr(current),
@@ -776,12 +771,13 @@ EXPORT_SYMBOL(drm_ioctl);
  */
 bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
 {
-	if ((nr >= DRM_COMMAND_END && nr < DRM_CORE_IOCTL_COUNT) ||
-	    (nr < DRM_COMMAND_BASE)) {
-		*flags = drm_ioctls[nr].flags;
-		return true;
-	}
+	if (nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END)
+		return false;
+
+	if (nr >= DRM_CORE_IOCTL_COUNT)
+		return false;
 
-	return false;
+	*flags = drm_ioctls[nr].flags;
+	return true;
 }
 EXPORT_SYMBOL(drm_ioctl_flags);

+ 7 - 1
drivers/gpu/drm/drm_modes.c

@@ -903,6 +903,12 @@ EXPORT_SYMBOL(drm_mode_duplicate);
  */
 bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
 {
+	if (!mode1 && !mode2)
+		return true;
+
+	if (!mode1 || !mode2)
+		return false;
+
 	/* do clock check convert to PICOS so fb modes get matched
 	 * the same */
 	if (mode1->clock && mode2->clock) {
@@ -1148,7 +1154,7 @@ EXPORT_SYMBOL(drm_mode_sort);
 /**
  * drm_mode_connector_list_update - update the mode list for the connector
  * @connector: the connector to update
- * @merge_type_bits: whether to merge or overright type bits.
+ * @merge_type_bits: whether to merge or overwrite type bits
  *
  * This moves the modes from the @connector probed_modes list
  * to the actual mode list. It compares the probed mode against the current

+ 1 - 0
drivers/gpu/drm/drm_pci.c

@@ -27,6 +27,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/export.h>
 #include <drm/drmP.h>
+#include "drm_internal.h"
 #include "drm_legacy.h"
 
 /**

+ 6 - 5
drivers/gpu/drm/drm_plane_helper.c

@@ -353,13 +353,14 @@ static struct drm_plane *create_primary_plane(struct drm_device *dev)
 	if (primary == NULL) {
 		DRM_DEBUG_KMS("Failed to allocate primary plane\n");
 		return NULL;
-		/*
-		 * Remove the format_default field from drm_plane when dropping
-		 * this helper.
-		 */
-		primary->format_default = true;
 	}
 
+	/*
+	 * Remove the format_default field from drm_plane when dropping
+	 * this helper.
+	 */
+	primary->format_default = true;
+
 	/* possible_crtc's will be filled in later by crtc_init */
 	ret = drm_universal_plane_init(dev, primary, 0,
 				       &drm_primary_helper_funcs,

+ 1 - 0
drivers/gpu/drm/drm_probe_helper.c

@@ -174,6 +174,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
 			struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
 
 			count = drm_add_edid_modes(connector, edid);
+			drm_edid_to_eld(connector, edid);
 		} else
 			count = (*connector_funcs->get_modes)(connector);
 	}

+ 1 - 0
drivers/gpu/drm/drm_vm.c

@@ -41,6 +41,7 @@
 #include <linux/slab.h>
 #endif
 #include <asm/pgtable.h>
+#include "drm_internal.h"
 #include "drm_legacy.h"
 
 struct drm_vma_entry {

+ 3 - 2
drivers/gpu/drm/exynos/exynos_drm_fbdev.c

@@ -76,6 +76,7 @@ static struct fb_ops exynos_drm_fb_ops = {
 };
 
 static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
+				     struct drm_fb_helper_surface_size *sizes,
 				     struct drm_framebuffer *fb)
 {
 	struct fb_info *fbi = helper->fbdev;
@@ -85,7 +86,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
 	unsigned long offset;
 
 	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
-	drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+	drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
 
 	/* RGB formats use only one buffer */
 	buffer = exynos_drm_fb_buffer(fb, 0);
@@ -189,7 +190,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
 		goto err_destroy_framebuffer;
 	}
 
-	ret = exynos_drm_fbdev_update(helper, helper->fb);
+	ret = exynos_drm_fbdev_update(helper, sizes, helper->fb);
 	if (ret < 0)
 		goto err_dealloc_cmap;
 

+ 5 - 3
drivers/gpu/drm/exynos/exynos_drm_fimd.c

@@ -147,6 +147,7 @@ struct fimd_win_data {
 	unsigned int		ovl_height;
 	unsigned int		fb_width;
 	unsigned int		fb_height;
+	unsigned int		fb_pitch;
 	unsigned int		bpp;
 	unsigned int		pixel_format;
 	dma_addr_t		dma_addr;
@@ -532,13 +533,14 @@ static void fimd_win_mode_set(struct exynos_drm_crtc *crtc,
 	win_data->offset_y = plane->crtc_y;
 	win_data->ovl_width = plane->crtc_width;
 	win_data->ovl_height = plane->crtc_height;
+	win_data->fb_pitch = plane->pitch;
 	win_data->fb_width = plane->fb_width;
 	win_data->fb_height = plane->fb_height;
 	win_data->dma_addr = plane->dma_addr[0] + offset;
 	win_data->bpp = plane->bpp;
 	win_data->pixel_format = plane->pixel_format;
-	win_data->buf_offsize = (plane->fb_width - plane->crtc_width) *
-				(plane->bpp >> 3);
+	win_data->buf_offsize =
+		plane->pitch - (plane->crtc_width * (plane->bpp >> 3));
 	win_data->line_size = plane->crtc_width * (plane->bpp >> 3);
 
 	DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
@@ -704,7 +706,7 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos)
 	writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
 
 	/* buffer end address */
-	size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
+	size = win_data->fb_pitch * win_data->ovl_height * (win_data->bpp >> 3);
 	val = (unsigned long)(win_data->dma_addr + size);
 	writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
 

+ 10 - 7
drivers/gpu/drm/exynos/exynos_mixer.c

@@ -55,6 +55,7 @@ struct hdmi_win_data {
 	unsigned int		fb_x;
 	unsigned int		fb_y;
 	unsigned int		fb_width;
+	unsigned int		fb_pitch;
 	unsigned int		fb_height;
 	unsigned int		src_width;
 	unsigned int		src_height;
@@ -438,7 +439,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
 	} else {
 		luma_addr[0] = win_data->dma_addr;
 		chroma_addr[0] = win_data->dma_addr
-			+ (win_data->fb_width * win_data->fb_height);
+			+ (win_data->fb_pitch * win_data->fb_height);
 	}
 
 	if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
@@ -447,8 +448,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
 			luma_addr[1] = luma_addr[0] + 0x40;
 			chroma_addr[1] = chroma_addr[0] + 0x40;
 		} else {
-			luma_addr[1] = luma_addr[0] + win_data->fb_width;
-			chroma_addr[1] = chroma_addr[0] + win_data->fb_width;
+			luma_addr[1] = luma_addr[0] + win_data->fb_pitch;
+			chroma_addr[1] = chroma_addr[0] + win_data->fb_pitch;
 		}
 	} else {
 		ctx->interlace = false;
@@ -469,10 +470,10 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
 	vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
 
 	/* setting size of input image */
-	vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) |
+	vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_pitch) |
 		VP_IMG_VSIZE(win_data->fb_height));
 	/* chroma height has to reduced by 2 to avoid chroma distorions */
-	vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) |
+	vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_pitch) |
 		VP_IMG_VSIZE(win_data->fb_height / 2));
 
 	vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width);
@@ -559,7 +560,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
 	/* converting dma address base and source offset */
 	dma_addr = win_data->dma_addr
 		+ (win_data->fb_x * win_data->bpp >> 3)
-		+ (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3);
+		+ (win_data->fb_y * win_data->fb_pitch);
 	src_x_offset = 0;
 	src_y_offset = 0;
 
@@ -576,7 +577,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
 		MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
 
 	/* setup geometry */
-	mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width);
+	mixer_reg_write(res, MXR_GRAPHIC_SPAN(win),
+			win_data->fb_pitch / (win_data->bpp >> 3));
 
 	/* setup display size */
 	if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
@@ -961,6 +963,7 @@ static void mixer_win_mode_set(struct exynos_drm_crtc *crtc,
 	win_data->fb_y = plane->fb_y;
 	win_data->fb_width = plane->fb_width;
 	win_data->fb_height = plane->fb_height;
+	win_data->fb_pitch = plane->pitch;
 	win_data->src_width = plane->src_width;
 	win_data->src_height = plane->src_height;
 

+ 102 - 95
drivers/gpu/drm/i2c/adv7511.c

@@ -27,12 +27,13 @@ struct adv7511 {
 	struct regmap *regmap;
 	struct regmap *packet_memory_regmap;
 	enum drm_connector_status status;
-	int dpms_mode;
+	bool powered;
 
 	unsigned int f_tmds;
 
 	unsigned int current_edid_segment;
 	uint8_t edid_buf[256];
+	bool edid_read;
 
 	wait_queue_head_t wq;
 	struct drm_encoder *encoder;
@@ -357,6 +358,48 @@ static void adv7511_set_link_config(struct adv7511 *adv7511,
 	adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB;
 }
 
+static void adv7511_power_on(struct adv7511 *adv7511)
+{
+	adv7511->current_edid_segment = -1;
+
+	regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+		     ADV7511_INT0_EDID_READY);
+	regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
+		     ADV7511_INT1_DDC_ERROR);
+	regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+			   ADV7511_POWER_POWER_DOWN, 0);
+
+	/*
+	 * Per spec it is allowed to pulse the HDP signal to indicate that the
+	 * EDID information has changed. Some monitors do this when they wakeup
+	 * from standby or are enabled. When the HDP goes low the adv7511 is
+	 * reset and the outputs are disabled which might cause the monitor to
+	 * go to standby again. To avoid this we ignore the HDP pin for the
+	 * first few seconds after enabling the output.
+	 */
+	regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+			   ADV7511_REG_POWER2_HDP_SRC_MASK,
+			   ADV7511_REG_POWER2_HDP_SRC_NONE);
+
+	/*
+	 * Most of the registers are reset during power down or when HPD is low.
+	 */
+	regcache_sync(adv7511->regmap);
+
+	adv7511->powered = true;
+}
+
+static void adv7511_power_off(struct adv7511 *adv7511)
+{
+	/* TODO: setup additional power down modes */
+	regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+			   ADV7511_POWER_POWER_DOWN,
+			   ADV7511_POWER_POWER_DOWN);
+	regcache_mark_dirty(adv7511->regmap);
+
+	adv7511->powered = false;
+}
+
 /* -----------------------------------------------------------------------------
  * Interrupt and hotplug detection
  */
@@ -379,69 +422,71 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
 	return false;
 }
 
-static irqreturn_t adv7511_irq_handler(int irq, void *devid)
-{
-	struct adv7511 *adv7511 = devid;
-
-	if (adv7511_hpd(adv7511))
-		drm_helper_hpd_irq_event(adv7511->encoder->dev);
-
-	wake_up_all(&adv7511->wq);
-
-	return IRQ_HANDLED;
-}
-
-static unsigned int adv7511_is_interrupt_pending(struct adv7511 *adv7511,
-						 unsigned int irq)
+static int adv7511_irq_process(struct adv7511 *adv7511)
 {
 	unsigned int irq0, irq1;
-	unsigned int pending;
 	int ret;
 
 	ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
 	if (ret < 0)
-		return 0;
+		return ret;
+
 	ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1);
 	if (ret < 0)
-		return 0;
+		return ret;
 
-	pending = (irq1 << 8) | irq0;
+	regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
+	regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
 
-	return pending & irq;
+	if (irq0 & ADV7511_INT0_HDP)
+		drm_helper_hpd_irq_event(adv7511->encoder->dev);
+
+	if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
+		adv7511->edid_read = true;
+
+		if (adv7511->i2c_main->irq)
+			wake_up_all(&adv7511->wq);
+	}
+
+	return 0;
 }
 
-static int adv7511_wait_for_interrupt(struct adv7511 *adv7511, int irq,
-				      int timeout)
+static irqreturn_t adv7511_irq_handler(int irq, void *devid)
+{
+	struct adv7511 *adv7511 = devid;
+	int ret;
+
+	ret = adv7511_irq_process(adv7511);
+	return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
+}
+
+/* -----------------------------------------------------------------------------
+ * EDID retrieval
+ */
+
+static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
 {
-	unsigned int pending;
 	int ret;
 
 	if (adv7511->i2c_main->irq) {
 		ret = wait_event_interruptible_timeout(adv7511->wq,
-				adv7511_is_interrupt_pending(adv7511, irq),
-				msecs_to_jiffies(timeout));
-		if (ret <= 0)
-			return 0;
-		pending = adv7511_is_interrupt_pending(adv7511, irq);
+				adv7511->edid_read, msecs_to_jiffies(timeout));
 	} else {
-		if (timeout < 25)
-			timeout = 25;
-		do {
-			pending = adv7511_is_interrupt_pending(adv7511, irq);
-			if (pending)
+		for (; timeout > 0; timeout -= 25) {
+			ret = adv7511_irq_process(adv7511);
+			if (ret < 0)
 				break;
+
+			if (adv7511->edid_read)
+				break;
+
 			msleep(25);
-			timeout -= 25;
-		} while (timeout >= 25);
+		}
 	}
 
-	return pending;
+	return adv7511->edid_read ? 0 : -EIO;
 }
 
-/* -----------------------------------------------------------------------------
- * EDID retrieval
- */
-
 static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
 				  size_t len)
 {
@@ -463,19 +508,14 @@ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
 			return ret;
 
 		if (status != 2) {
+			adv7511->edid_read = false;
 			regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT,
 				     block);
-			ret = adv7511_wait_for_interrupt(adv7511,
-					ADV7511_INT0_EDID_READY |
-					ADV7511_INT1_DDC_ERROR, 200);
-
-			if (!(ret & ADV7511_INT0_EDID_READY))
-				return -EIO;
+			ret = adv7511_wait_for_edid(adv7511, 200);
+			if (ret < 0)
+				return ret;
 		}
 
-		regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
-			     ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
-
 		/* Break this apart, hopefully more I2C controllers will
 		 * support 64 byte transfers than 256 byte transfers
 		 */
@@ -526,9 +566,11 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
 	unsigned int count;
 
 	/* Reading the EDID only works if the device is powered */
-	if (adv7511->dpms_mode != DRM_MODE_DPMS_ON) {
+	if (!adv7511->powered) {
 		regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
-			     ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
+			     ADV7511_INT0_EDID_READY);
+		regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
+			     ADV7511_INT1_DDC_ERROR);
 		regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
 				   ADV7511_POWER_POWER_DOWN, 0);
 		adv7511->current_edid_segment = -1;
@@ -536,7 +578,7 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
 
 	edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
 
-	if (adv7511->dpms_mode != DRM_MODE_DPMS_ON)
+	if (!adv7511->powered)
 		regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
 				   ADV7511_POWER_POWER_DOWN,
 				   ADV7511_POWER_POWER_DOWN);
@@ -558,41 +600,10 @@ static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
 
-	switch (mode) {
-	case DRM_MODE_DPMS_ON:
-		adv7511->current_edid_segment = -1;
-
-		regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
-			     ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
-		regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
-				   ADV7511_POWER_POWER_DOWN, 0);
-		/*
-		 * Per spec it is allowed to pulse the HDP signal to indicate
-		 * that the EDID information has changed. Some monitors do this
-		 * when they wakeup from standby or are enabled. When the HDP
-		 * goes low the adv7511 is reset and the outputs are disabled
-		 * which might cause the monitor to go to standby again. To
-		 * avoid this we ignore the HDP pin for the first few seconds
-		 * after enabling the output.
-		 */
-		regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
-				   ADV7511_REG_POWER2_HDP_SRC_MASK,
-				   ADV7511_REG_POWER2_HDP_SRC_NONE);
-		/* Most of the registers are reset during power down or
-		 * when HPD is low
-		 */
-		regcache_sync(adv7511->regmap);
-		break;
-	default:
-		/* TODO: setup additional power down modes */
-		regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
-				   ADV7511_POWER_POWER_DOWN,
-				   ADV7511_POWER_POWER_DOWN);
-		regcache_mark_dirty(adv7511->regmap);
-		break;
-	}
-
-	adv7511->dpms_mode = mode;
+	if (mode == DRM_MODE_DPMS_ON)
+		adv7511_power_on(adv7511);
+	else
+		adv7511_power_off(adv7511);
 }
 
 static enum drm_connector_status
@@ -620,10 +631,9 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
 	 * there is a pending HPD interrupt and the cable is connected there was
 	 * at least one transition from disconnected to connected and the chip
 	 * has to be reinitialized. */
-	if (status == connector_status_connected && hpd &&
-	    adv7511->dpms_mode == DRM_MODE_DPMS_ON) {
+	if (status == connector_status_connected && hpd && adv7511->powered) {
 		regcache_mark_dirty(adv7511->regmap);
-		adv7511_encoder_dpms(encoder, adv7511->dpms_mode);
+		adv7511_power_on(adv7511);
 		adv7511_get_modes(encoder, connector);
 		if (adv7511->status == connector_status_connected)
 			status = connector_status_disconnected;
@@ -858,7 +868,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
 	if (!adv7511)
 		return -ENOMEM;
 
-	adv7511->dpms_mode = DRM_MODE_DPMS_OFF;
+	adv7511->powered = false;
 	adv7511->status = connector_status_disconnected;
 
 	ret = adv7511_parse_dt(dev->of_node, &link_config);
@@ -918,10 +928,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
 	regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
 		     ADV7511_CEC_CTRL_POWER_DOWN);
 
-	regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
-			   ADV7511_POWER_POWER_DOWN, ADV7511_POWER_POWER_DOWN);
-
-	adv7511->current_edid_segment = -1;
+	adv7511_power_off(adv7511);
 
 	i2c_set_clientdata(i2c, adv7511);
 

+ 120 - 73
drivers/gpu/drm/i915/i915_debugfs.c

@@ -4541,12 +4541,116 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
 			i915_cache_sharing_get, i915_cache_sharing_set,
 			"%llu\n");
 
+struct sseu_dev_status {
+	unsigned int slice_total;
+	unsigned int subslice_total;
+	unsigned int subslice_per_slice;
+	unsigned int eu_total;
+	unsigned int eu_per_subslice;
+};
+
+static void cherryview_sseu_device_status(struct drm_device *dev,
+					  struct sseu_dev_status *stat)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const int ss_max = 2;
+	int ss;
+	u32 sig1[ss_max], sig2[ss_max];
+
+	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
+	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
+	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
+	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
+
+	for (ss = 0; ss < ss_max; ss++) {
+		unsigned int eu_cnt;
+
+		if (sig1[ss] & CHV_SS_PG_ENABLE)
+			/* skip disabled subslice */
+			continue;
+
+		stat->slice_total = 1;
+		stat->subslice_per_slice++;
+		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
+			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
+			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
+			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
+		stat->eu_total += eu_cnt;
+		stat->eu_per_subslice = max(stat->eu_per_subslice, eu_cnt);
+	}
+	stat->subslice_total = stat->subslice_per_slice;
+}
+
+static void gen9_sseu_device_status(struct drm_device *dev,
+				    struct sseu_dev_status *stat)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int s_max = 3, ss_max = 4;
+	int s, ss;
+	u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
+
+	/* BXT has a single slice and at most 3 subslices. */
+	if (IS_BROXTON(dev)) {
+		s_max = 1;
+		ss_max = 3;
+	}
+
+	for (s = 0; s < s_max; s++) {
+		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
+		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
+		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
+	}
+
+	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
+		     GEN9_PGCTL_SSA_EU19_ACK |
+		     GEN9_PGCTL_SSA_EU210_ACK |
+		     GEN9_PGCTL_SSA_EU311_ACK;
+	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
+		     GEN9_PGCTL_SSB_EU19_ACK |
+		     GEN9_PGCTL_SSB_EU210_ACK |
+		     GEN9_PGCTL_SSB_EU311_ACK;
+
+	for (s = 0; s < s_max; s++) {
+		unsigned int ss_cnt = 0;
+
+		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
+			/* skip disabled slice */
+			continue;
+
+		stat->slice_total++;
+
+		if (IS_SKYLAKE(dev))
+			ss_cnt = INTEL_INFO(dev)->subslice_per_slice;
+
+		for (ss = 0; ss < ss_max; ss++) {
+			unsigned int eu_cnt;
+
+			if (IS_BROXTON(dev) &&
+			    !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
+				/* skip disabled subslice */
+				continue;
+
+			if (IS_BROXTON(dev))
+				ss_cnt++;
+
+			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
+					       eu_mask[ss%2]);
+			stat->eu_total += eu_cnt;
+			stat->eu_per_subslice = max(stat->eu_per_subslice,
+						    eu_cnt);
+		}
+
+		stat->subslice_total += ss_cnt;
+		stat->subslice_per_slice = max(stat->subslice_per_slice,
+					       ss_cnt);
+	}
+}
+
 static int i915_sseu_status(struct seq_file *m, void *unused)
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned int s_tot = 0, ss_tot = 0, ss_per = 0, eu_tot = 0, eu_per = 0;
+	struct sseu_dev_status stat;
 
 	if ((INTEL_INFO(dev)->gen < 8) || IS_BROADWELL(dev))
 		return -ENODEV;
@@ -4570,79 +4674,22 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
 		   yesno(INTEL_INFO(dev)->has_eu_pg));
 
 	seq_puts(m, "SSEU Device Status\n");
+	memset(&stat, 0, sizeof(stat));
 	if (IS_CHERRYVIEW(dev)) {
-		const int ss_max = 2;
-		int ss;
-		u32 sig1[ss_max], sig2[ss_max];
-
-		sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
-		sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
-		sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
-		sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
-
-		for (ss = 0; ss < ss_max; ss++) {
-			unsigned int eu_cnt;
-
-			if (sig1[ss] & CHV_SS_PG_ENABLE)
-				/* skip disabled subslice */
-				continue;
-
-			s_tot = 1;
-			ss_per++;
-			eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
-				 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
-				 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
-				 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
-			eu_tot += eu_cnt;
-			eu_per = max(eu_per, eu_cnt);
-		}
-		ss_tot = ss_per;
-	} else if (IS_SKYLAKE(dev)) {
-		const int s_max = 3, ss_max = 4;
-		int s, ss;
-		u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
-
-		s_reg[0] = I915_READ(GEN9_SLICE0_PGCTL_ACK);
-		s_reg[1] = I915_READ(GEN9_SLICE1_PGCTL_ACK);
-		s_reg[2] = I915_READ(GEN9_SLICE2_PGCTL_ACK);
-		eu_reg[0] = I915_READ(GEN9_SLICE0_SS01_EU_PGCTL_ACK);
-		eu_reg[1] = I915_READ(GEN9_SLICE0_SS23_EU_PGCTL_ACK);
-		eu_reg[2] = I915_READ(GEN9_SLICE1_SS01_EU_PGCTL_ACK);
-		eu_reg[3] = I915_READ(GEN9_SLICE1_SS23_EU_PGCTL_ACK);
-		eu_reg[4] = I915_READ(GEN9_SLICE2_SS01_EU_PGCTL_ACK);
-		eu_reg[5] = I915_READ(GEN9_SLICE2_SS23_EU_PGCTL_ACK);
-		eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
-			     GEN9_PGCTL_SSA_EU19_ACK |
-			     GEN9_PGCTL_SSA_EU210_ACK |
-			     GEN9_PGCTL_SSA_EU311_ACK;
-		eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
-			     GEN9_PGCTL_SSB_EU19_ACK |
-			     GEN9_PGCTL_SSB_EU210_ACK |
-			     GEN9_PGCTL_SSB_EU311_ACK;
-
-		for (s = 0; s < s_max; s++) {
-			if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
-				/* skip disabled slice */
-				continue;
-
-			s_tot++;
-			ss_per = INTEL_INFO(dev)->subslice_per_slice;
-			ss_tot += ss_per;
-			for (ss = 0; ss < ss_max; ss++) {
-				unsigned int eu_cnt;
-
-				eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
-						       eu_mask[ss%2]);
-				eu_tot += eu_cnt;
-				eu_per = max(eu_per, eu_cnt);
-			}
-		}
+		cherryview_sseu_device_status(dev, &stat);
+	} else if (INTEL_INFO(dev)->gen >= 9) {
+		gen9_sseu_device_status(dev, &stat);
 	}
-	seq_printf(m, "  Enabled Slice Total: %u\n", s_tot);
-	seq_printf(m, "  Enabled Subslice Total: %u\n", ss_tot);
-	seq_printf(m, "  Enabled Subslice Per Slice: %u\n", ss_per);
-	seq_printf(m, "  Enabled EU Total: %u\n", eu_tot);
-	seq_printf(m, "  Enabled EU Per Subslice: %u\n", eu_per);
+	seq_printf(m, "  Enabled Slice Total: %u\n",
+		   stat.slice_total);
+	seq_printf(m, "  Enabled Subslice Total: %u\n",
+		   stat.subslice_total);
+	seq_printf(m, "  Enabled Subslice Per Slice: %u\n",
+		   stat.subslice_per_slice);
+	seq_printf(m, "  Enabled EU Total: %u\n",
+		   stat.eu_total);
+	seq_printf(m, "  Enabled EU Per Subslice: %u\n",
+		   stat.eu_per_subslice);
 
 	return 0;
 }

+ 143 - 110
drivers/gpu/drm/i915/i915_dma.c

@@ -564,6 +564,140 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
 #undef SEP_COMMA
 }
 
+static void cherryview_sseu_info_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_device_info *info;
+	u32 fuse, eu_dis;
+
+	info = (struct intel_device_info *)&dev_priv->info;
+	fuse = I915_READ(CHV_FUSE_GT);
+
+	info->slice_total = 1;
+
+	if (!(fuse & CHV_FGT_DISABLE_SS0)) {
+		info->subslice_per_slice++;
+		eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
+				 CHV_FGT_EU_DIS_SS0_R1_MASK);
+		info->eu_total += 8 - hweight32(eu_dis);
+	}
+
+	if (!(fuse & CHV_FGT_DISABLE_SS1)) {
+		info->subslice_per_slice++;
+		eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
+				 CHV_FGT_EU_DIS_SS1_R1_MASK);
+		info->eu_total += 8 - hweight32(eu_dis);
+	}
+
+	info->subslice_total = info->subslice_per_slice;
+	/*
+	 * CHV expected to always have a uniform distribution of EU
+	 * across subslices.
+	*/
+	info->eu_per_subslice = info->subslice_total ?
+				info->eu_total / info->subslice_total :
+				0;
+	/*
+	 * CHV supports subslice power gating on devices with more than
+	 * one subslice, and supports EU power gating on devices with
+	 * more than one EU pair per subslice.
+	*/
+	info->has_slice_pg = 0;
+	info->has_subslice_pg = (info->subslice_total > 1);
+	info->has_eu_pg = (info->eu_per_subslice > 2);
+}
+
+static void gen9_sseu_info_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_device_info *info;
+	int s_max = 3, ss_max = 4, eu_max = 8;
+	int s, ss;
+	u32 fuse2, s_enable, ss_disable, eu_disable;
+	u8 eu_mask = 0xff;
+
+	/*
+	 * BXT has a single slice. BXT also has at most 6 EU per subslice,
+	 * and therefore only the lowest 6 bits of the 8-bit EU disable
+	 * fields are valid.
+	*/
+	if (IS_BROXTON(dev)) {
+		s_max = 1;
+		eu_max = 6;
+		eu_mask = 0x3f;
+	}
+
+	info = (struct intel_device_info *)&dev_priv->info;
+	fuse2 = I915_READ(GEN8_FUSE2);
+	s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
+		   GEN8_F2_S_ENA_SHIFT;
+	ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
+		     GEN9_F2_SS_DIS_SHIFT;
+
+	info->slice_total = hweight32(s_enable);
+	/*
+	 * The subslice disable field is global, i.e. it applies
+	 * to each of the enabled slices.
+	*/
+	info->subslice_per_slice = ss_max - hweight32(ss_disable);
+	info->subslice_total = info->slice_total *
+			       info->subslice_per_slice;
+
+	/*
+	 * Iterate through enabled slices and subslices to
+	 * count the total enabled EU.
+	*/
+	for (s = 0; s < s_max; s++) {
+		if (!(s_enable & (0x1 << s)))
+			/* skip disabled slice */
+			continue;
+
+		eu_disable = I915_READ(GEN9_EU_DISABLE(s));
+		for (ss = 0; ss < ss_max; ss++) {
+			int eu_per_ss;
+
+			if (ss_disable & (0x1 << ss))
+				/* skip disabled subslice */
+				continue;
+
+			eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
+						      eu_mask);
+
+			/*
+			 * Record which subslice(s) has(have) 7 EUs. we
+			 * can tune the hash used to spread work among
+			 * subslices if they are unbalanced.
+			 */
+			if (eu_per_ss == 7)
+				info->subslice_7eu[s] |= 1 << ss;
+
+			info->eu_total += eu_per_ss;
+		}
+	}
+
+	/*
+	 * SKL is expected to always have a uniform distribution
+	 * of EU across subslices with the exception that any one
+	 * EU in any one subslice may be fused off for die
+	 * recovery. BXT is expected to be perfectly uniform in EU
+	 * distribution.
+	*/
+	info->eu_per_subslice = info->subslice_total ?
+				DIV_ROUND_UP(info->eu_total,
+					     info->subslice_total) : 0;
+	/*
+	 * SKL supports slice power gating on devices with more than
+	 * one slice, and supports EU power gating on devices with
+	 * more than one EU pair per subslice. BXT supports subslice
+	 * power gating on devices with more than one subslice, and
+	 * supports EU power gating on devices with more than one EU
+	 * pair per subslice.
+	*/
+	info->has_slice_pg = (IS_SKYLAKE(dev) && (info->slice_total > 1));
+	info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
+	info->has_eu_pg = (info->eu_per_subslice > 2);
+}
+
 /*
  * Determine various intel_device_info fields at runtime.
  *
@@ -585,7 +719,11 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
 
 	info = (struct intel_device_info *)&dev_priv->info;
 
-	if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
+	if (IS_BROXTON(dev)) {
+		info->num_sprites[PIPE_A] = 3;
+		info->num_sprites[PIPE_B] = 3;
+		info->num_sprites[PIPE_C] = 2;
+	} else if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
 		for_each_pipe(dev_priv, pipe)
 			info->num_sprites[pipe] = 2;
 	else
@@ -620,116 +758,11 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
 	}
 
 	/* Initialize slice/subslice/EU info */
-	if (IS_CHERRYVIEW(dev)) {
-		u32 fuse, eu_dis;
-
-		fuse = I915_READ(CHV_FUSE_GT);
-
-		info->slice_total = 1;
-
-		if (!(fuse & CHV_FGT_DISABLE_SS0)) {
-			info->subslice_per_slice++;
-			eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
-					 CHV_FGT_EU_DIS_SS0_R1_MASK);
-			info->eu_total += 8 - hweight32(eu_dis);
-		}
-
-		if (!(fuse & CHV_FGT_DISABLE_SS1)) {
-			info->subslice_per_slice++;
-			eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
-					CHV_FGT_EU_DIS_SS1_R1_MASK);
-			info->eu_total += 8 - hweight32(eu_dis);
-		}
-
-		info->subslice_total = info->subslice_per_slice;
-		/*
-		 * CHV expected to always have a uniform distribution of EU
-		 * across subslices.
-		*/
-		info->eu_per_subslice = info->subslice_total ?
-					info->eu_total / info->subslice_total :
-					0;
-		/*
-		 * CHV supports subslice power gating on devices with more than
-		 * one subslice, and supports EU power gating on devices with
-		 * more than one EU pair per subslice.
-		*/
-		info->has_slice_pg = 0;
-		info->has_subslice_pg = (info->subslice_total > 1);
-		info->has_eu_pg = (info->eu_per_subslice > 2);
-	} else if (IS_SKYLAKE(dev)) {
-		const int s_max = 3, ss_max = 4, eu_max = 8;
-		int s, ss;
-		u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
-
-		fuse2 = I915_READ(GEN8_FUSE2);
-		s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
-			   GEN8_F2_S_ENA_SHIFT;
-		ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
-			     GEN9_F2_SS_DIS_SHIFT;
-
-		eu_disable[0] = I915_READ(GEN8_EU_DISABLE0);
-		eu_disable[1] = I915_READ(GEN8_EU_DISABLE1);
-		eu_disable[2] = I915_READ(GEN8_EU_DISABLE2);
-
-		info->slice_total = hweight32(s_enable);
-		/*
-		 * The subslice disable field is global, i.e. it applies
-		 * to each of the enabled slices.
-		*/
-		info->subslice_per_slice = ss_max - hweight32(ss_disable);
-		info->subslice_total = info->slice_total *
-				       info->subslice_per_slice;
-
-		/*
-		 * Iterate through enabled slices and subslices to
-		 * count the total enabled EU.
-		*/
-		for (s = 0; s < s_max; s++) {
-			if (!(s_enable & (0x1 << s)))
-				/* skip disabled slice */
-				continue;
-
-			for (ss = 0; ss < ss_max; ss++) {
-				u32 n_disabled;
-
-				if (ss_disable & (0x1 << ss))
-					/* skip disabled subslice */
-					continue;
+	if (IS_CHERRYVIEW(dev))
+		cherryview_sseu_info_init(dev);
+	else if (INTEL_INFO(dev)->gen >= 9)
+		gen9_sseu_info_init(dev);
 
-				n_disabled = hweight8(eu_disable[s] >>
-						      (ss * eu_max));
-
-				/*
-				 * Record which subslice(s) has(have) 7 EUs. we
-				 * can tune the hash used to spread work among
-				 * subslices if they are unbalanced.
-				 */
-				if (eu_max - n_disabled == 7)
-					info->subslice_7eu[s] |= 1 << ss;
-
-				info->eu_total += eu_max - n_disabled;
-			}
-		}
-
-		/*
-		 * SKL is expected to always have a uniform distribution
-		 * of EU across subslices with the exception that any one
-		 * EU in any one subslice may be fused off for die
-		 * recovery.
-		*/
-		info->eu_per_subslice = info->subslice_total ?
-					DIV_ROUND_UP(info->eu_total,
-						     info->subslice_total) : 0;
-		/*
-		 * SKL supports slice power gating on devices with more than
-		 * one slice, and supports EU power gating on devices with
-		 * more than one EU pair per subslice.
-		*/
-		info->has_slice_pg = (info->slice_total > 1) ? 1 : 0;
-		info->has_subslice_pg = 0;
-		info->has_eu_pg = (info->eu_per_subslice > 2) ? 1 : 0;
-	}
 	DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
 	DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
 	DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);

+ 14 - 1
drivers/gpu/drm/i915/i915_drv.c

@@ -381,6 +381,18 @@ static const struct intel_device_info intel_skylake_gt3_info = {
 	IVB_CURSOR_OFFSETS,
 };
 
+static const struct intel_device_info intel_broxton_info = {
+	.is_preliminary = 1,
+	.gen = 9,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+	.num_pipes = 3,
+	.has_ddi = 1,
+	.has_fbc = 1,
+	GEN_DEFAULT_PIPEOFFSETS,
+	IVB_CURSOR_OFFSETS,
+};
+
 /*
  * Make sure any device matches here are from most specific to most
  * general.  For example, since the Quanta match is based on the subsystem
@@ -420,7 +432,8 @@ static const struct intel_device_info intel_skylake_gt3_info = {
 	INTEL_CHV_IDS(&intel_cherryview_info),	\
 	INTEL_SKL_GT1_IDS(&intel_skylake_info),	\
 	INTEL_SKL_GT2_IDS(&intel_skylake_info),	\
-	INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info)	\
+	INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),	\
+	INTEL_BXT_IDS(&intel_broxton_info)
 
 static const struct pci_device_id pciidlist[] = {		/* aka */
 	INTEL_PCI_IDS,

+ 6 - 1
drivers/gpu/drm/i915/i915_drv.h

@@ -130,7 +130,7 @@ enum transcoder {
  *
  * This value doesn't count the cursor plane.
  */
-#define I915_MAX_PLANES	3
+#define I915_MAX_PLANES	4
 
 enum plane {
 	PLANE_A = 0,
@@ -2319,6 +2319,7 @@ struct drm_i915_cmd_table {
 #define IS_HASWELL(dev)	(INTEL_INFO(dev)->is_haswell)
 #define IS_BROADWELL(dev)	(!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
 #define IS_SKYLAKE(dev)	(INTEL_INFO(dev)->is_skylake)
+#define IS_BROXTON(dev)	(!INTEL_INFO(dev)->is_skylake && IS_GEN9(dev))
 #define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
 #define IS_HSW_EARLY_SDV(dev)	(IS_HASWELL(dev) && \
 				 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
@@ -2343,6 +2344,10 @@ struct drm_i915_cmd_table {
 #define SKL_REVID_D0		(0x3)
 #define SKL_REVID_E0		(0x4)
 
+#define BXT_REVID_A0		(0x0)
+#define BXT_REVID_B0		(0x3)
+#define BXT_REVID_C0		(0x6)
+
 /*
  * The genX designation typically refers to the render engine, so render
  * capability related checks should use IS_GEN, while display and other checks

+ 13 - 3
drivers/gpu/drm/i915/i915_gem_gtt.c

@@ -1867,7 +1867,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 
 
 	if (INTEL_INFO(dev)->gen >= 8) {
-		if (IS_CHERRYVIEW(dev))
+		if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
 			chv_setup_private_ppat(dev_priv);
 		else
 			bdw_setup_private_ppat(dev_priv);
@@ -2433,7 +2433,17 @@ static int ggtt_probe_common(struct drm_device *dev,
 	gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
 		(pci_resource_len(dev->pdev, 0) / 2);
 
-	dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
+	/*
+	 * On BXT writes larger than 64 bit to the GTT pagetable range will be
+	 * dropped. For WC mappings in general we have 64 byte burst writes
+	 * when the WC buffer is flushed, so we can't use it, but have to
+	 * resort to an uncached mapping. The WC issue is easily caught by the
+	 * readback check when writing GTT PTE entries.
+	 */
+	if (IS_BROXTON(dev))
+		dev_priv->gtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size);
+	else
+		dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
 	if (!dev_priv->gtt.gsm) {
 		DRM_ERROR("Failed to map the gtt page table\n");
 		return -ENOMEM;
@@ -2555,7 +2565,7 @@ static int gen8_gmch_probe(struct drm_device *dev,
 
 	*gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
 
-	if (IS_CHERRYVIEW(dev))
+	if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
 		chv_setup_private_ppat(dev_priv);
 	else
 		bdw_setup_private_ppat(dev_priv);

+ 1 - 1
drivers/gpu/drm/i915/i915_gem_stolen.c

@@ -209,7 +209,7 @@ static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
 
 	dev_priv->fbc.threshold = ret;
 
-	if (HAS_PCH_SPLIT(dev))
+	if (INTEL_INFO(dev_priv)->gen >= 5)
 		I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
 	else if (IS_GM45(dev)) {
 		I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);

+ 14 - 12
drivers/gpu/drm/i915/i915_reg.h

@@ -1148,6 +1148,7 @@ enum skl_disp_power_wells {
 /* control register for cpu gtt access */
 #define TILECTL				0x101000
 #define   TILECTL_SWZCTL			(1 << 0)
+#define   TILECTL_TLBPF			(1 << 1)
 #define   TILECTL_TLB_PREFETCH_DIS	(1 << 2)
 #define   TILECTL_BACKSNOOP_DIS		(1 << 3)
 
@@ -1552,9 +1553,7 @@ enum skl_disp_power_wells {
 #define   GEN9_F2_SS_DIS_SHIFT		20
 #define   GEN9_F2_SS_DIS_MASK		(0xf << GEN9_F2_SS_DIS_SHIFT)
 
-#define GEN8_EU_DISABLE0		0x9134
-#define GEN8_EU_DISABLE1		0x9138
-#define GEN8_EU_DISABLE2		0x913c
+#define GEN9_EU_DISABLE(slice)		(0x9134 + (slice)*0x4)
 
 #define GEN6_BSD_SLEEP_PSMI_CONTROL	0x12050
 #define   GEN6_BSD_SLEEP_MSG_DISABLE	(1 << 0)
@@ -5347,9 +5346,11 @@ enum skl_disp_power_wells {
 #define  GEN8_PIPE_VSYNC		(1 << 1)
 #define  GEN8_PIPE_VBLANK		(1 << 0)
 #define  GEN9_PIPE_CURSOR_FAULT		(1 << 11)
+#define  GEN9_PIPE_PLANE4_FAULT		(1 << 10)
 #define  GEN9_PIPE_PLANE3_FAULT		(1 << 9)
 #define  GEN9_PIPE_PLANE2_FAULT		(1 << 8)
 #define  GEN9_PIPE_PLANE1_FAULT		(1 << 7)
+#define  GEN9_PIPE_PLANE4_FLIP_DONE	(1 << 6)
 #define  GEN9_PIPE_PLANE3_FLIP_DONE	(1 << 5)
 #define  GEN9_PIPE_PLANE2_FLIP_DONE	(1 << 4)
 #define  GEN9_PIPE_PLANE1_FLIP_DONE	(1 << 3)
@@ -5360,6 +5361,7 @@ enum skl_disp_power_wells {
 	 GEN8_PIPE_PRIMARY_FAULT)
 #define GEN9_DE_PIPE_IRQ_FAULT_ERRORS \
 	(GEN9_PIPE_CURSOR_FAULT | \
+	 GEN9_PIPE_PLANE4_FAULT | \
 	 GEN9_PIPE_PLANE3_FAULT | \
 	 GEN9_PIPE_PLANE2_FAULT | \
 	 GEN9_PIPE_PLANE1_FAULT)
@@ -5477,6 +5479,10 @@ enum skl_disp_power_wells {
 #define  HDC_FORCE_NON_COHERENT			(1<<4)
 #define  HDC_BARRIER_PERFORMANCE_DISABLE	(1<<10)
 
+/* GEN9 chicken */
+#define SLICE_ECO_CHICKEN0			0x7308
+#define   PIXEL_MASK_CAMMING_DISABLE		(1 << 14)
+
 /* WaCatErrorRejectionIssue */
 #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG		0x9030
 #define  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB	(1<<11)
@@ -6236,6 +6242,7 @@ enum skl_disp_power_wells {
 #define GEN8_UCGCTL6				0x9430
 #define   GEN8_GAPSUNIT_CLOCK_GATE_DISABLE	(1<<24)
 #define   GEN8_SDEUNIT_CLOCK_GATE_DISABLE	(1<<14)
+#define   GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1<<28)
 
 #define GEN6_GFXPAUSE				0xA000
 #define GEN6_RPNSWREQ				0xA008
@@ -6403,17 +6410,12 @@ enum skl_disp_power_wells {
 #define CHV_POWER_SS1_SIG2		0xa72c
 #define   CHV_EU311_PG_ENABLE		(1<<1)
 
-#define GEN9_SLICE0_PGCTL_ACK		0x804c
-#define GEN9_SLICE1_PGCTL_ACK		0x8050
-#define GEN9_SLICE2_PGCTL_ACK		0x8054
+#define GEN9_SLICE_PGCTL_ACK(slice)	(0x804c + (slice)*0x4)
 #define   GEN9_PGCTL_SLICE_ACK		(1 << 0)
+#define   GEN9_PGCTL_SS_ACK(subslice)	(1 << (2 + (subslice)*2))
 
-#define GEN9_SLICE0_SS01_EU_PGCTL_ACK	0x805c
-#define GEN9_SLICE0_SS23_EU_PGCTL_ACK	0x8060
-#define GEN9_SLICE1_SS01_EU_PGCTL_ACK	0x8064
-#define GEN9_SLICE1_SS23_EU_PGCTL_ACK	0x8068
-#define GEN9_SLICE2_SS01_EU_PGCTL_ACK	0x806c
-#define GEN9_SLICE2_SS23_EU_PGCTL_ACK	0x8070
+#define GEN9_SS01_EU_PGCTL_ACK(slice)	(0x805c + (slice)*0x8)
+#define GEN9_SS23_EU_PGCTL_ACK(slice)	(0x8060 + (slice)*0x8)
 #define   GEN9_PGCTL_SSA_EU08_ACK	(1 << 0)
 #define   GEN9_PGCTL_SSA_EU19_ACK	(1 << 2)
 #define   GEN9_PGCTL_SSA_EU210_ACK	(1 << 4)

+ 1 - 1
drivers/gpu/drm/i915/intel_ddi.c

@@ -322,7 +322,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
 	uint32_t reg = DDI_BUF_CTL(port);
 	int i;
 
-	for (i = 0; i < 8; i++) {
+	for (i = 0; i < 16; i++) {
 		udelay(1);
 		if (I915_READ(reg) & DDI_BUF_IS_IDLE)
 			return;

+ 10 - 1
drivers/gpu/drm/i915/intel_display.c

@@ -13505,7 +13505,16 @@ static void intel_setup_outputs(struct drm_device *dev)
 	if (intel_crt_present(dev))
 		intel_crt_init(dev);
 
-	if (HAS_DDI(dev)) {
+	if (IS_BROXTON(dev)) {
+		/*
+		 * FIXME: Broxton doesn't support port detection via the
+		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
+		 * detect the ports.
+		 */
+		intel_ddi_init(dev, PORT_A);
+		intel_ddi_init(dev, PORT_B);
+		intel_ddi_init(dev, PORT_C);
+	} else if (HAS_DDI(dev)) {
 		int found;
 
 		/*

+ 18 - 1
drivers/gpu/drm/i915/intel_lrc.c

@@ -1207,6 +1207,7 @@ static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
 {
 	struct intel_engine_cs *ring = ringbuf->ring;
 	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	bool vf_flush_wa;
 	u32 flags = 0;
 	int ret;
 
@@ -1228,10 +1229,26 @@ static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
 		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
 	}
 
-	ret = intel_logical_ring_begin(ringbuf, ctx, 6);
+	/*
+	 * On GEN9+ Before VF_CACHE_INVALIDATE we need to emit a NULL pipe
+	 * control.
+	 */
+	vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 &&
+		      flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
+
+	ret = intel_logical_ring_begin(ringbuf, ctx, vf_flush_wa ? 12 : 6);
 	if (ret)
 		return ret;
 
+	if (vf_flush_wa) {
+		intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+		intel_logical_ring_emit(ringbuf, 0);
+		intel_logical_ring_emit(ringbuf, 0);
+		intel_logical_ring_emit(ringbuf, 0);
+		intel_logical_ring_emit(ringbuf, 0);
+		intel_logical_ring_emit(ringbuf, 0);
+	}
+
 	intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
 	intel_logical_ring_emit(ringbuf, flags);
 	intel_logical_ring_emit(ringbuf, scratch_addr);

+ 31 - 2
drivers/gpu/drm/i915/intel_pm.c

@@ -98,6 +98,26 @@ static void skl_init_clock_gating(struct drm_device *dev)
 			   GEN8_LQSC_RO_PERF_DIS);
 }
 
+static void bxt_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	gen9_init_clock_gating(dev);
+
+	/*
+	 * FIXME:
+	 * GEN8_SDEUNIT_CLOCK_GATE_DISABLE applies on A0 only.
+	 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
+	 */
+	 /* WaDisableSDEUnitClockGating:bxt */
+	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE |
+		   GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
+
+	/* FIXME: apply on A0 only */
+	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
+}
+
 static void i915_pineview_get_mem_freq(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2542,6 +2562,7 @@ static bool ilk_disable_lp_wm(struct drm_device *dev)
  */
 
 #define SKL_DDB_SIZE		896	/* in blocks */
+#define BXT_DDB_SIZE		512
 
 static void
 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
@@ -2560,7 +2581,10 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
 		return;
 	}
 
-	ddb_size = SKL_DDB_SIZE;
+	if (IS_BROXTON(dev))
+		ddb_size = BXT_DDB_SIZE;
+	else
+		ddb_size = SKL_DDB_SIZE;
 
 	ddb_size -= 4; /* 4 blocks for bypass path allocation */
 
@@ -6570,7 +6594,12 @@ void intel_init_pm(struct drm_device *dev)
 	if (INTEL_INFO(dev)->gen >= 9) {
 		skl_setup_wm_latency(dev);
 
-		dev_priv->display.init_clock_gating = skl_init_clock_gating;
+		if (IS_BROXTON(dev))
+			dev_priv->display.init_clock_gating =
+				bxt_init_clock_gating;
+		else if (IS_SKYLAKE(dev))
+			dev_priv->display.init_clock_gating =
+				skl_init_clock_gating;
 		dev_priv->display.update_wm = skl_update_wm;
 		dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
 	} else if (HAS_PCH_SPLIT(dev)) {

+ 19 - 2
drivers/gpu/drm/i915/intel_ringbuffer.c

@@ -969,6 +969,15 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
 	WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
 			  GEN9_CCS_TLB_PREFETCH_ENABLE);
 
+	/*
+	 * FIXME: don't apply the following on BXT for stepping C. On BXT A0
+	 * the flag reads back as 0.
+	 */
+	/* WaDisableMaskBasedCammingInRCC:sklC,bxtA */
+	if (INTEL_REVID(dev) == SKL_REVID_C0 || IS_BROXTON(dev))
+		WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
+				  PIXEL_MASK_CAMMING_DISABLE);
+
 	return 0;
 }
 
@@ -1030,6 +1039,13 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
 	return skl_tune_iz_hashing(ring);
 }
 
+static int bxt_init_workarounds(struct intel_engine_cs *ring)
+{
+	gen9_init_workarounds(ring);
+
+	return 0;
+}
+
 int init_workarounds_ring(struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
@@ -1047,8 +1063,9 @@ int init_workarounds_ring(struct intel_engine_cs *ring)
 
 	if (IS_SKYLAKE(dev))
 		return skl_init_workarounds(ring);
-	else if (IS_GEN9(dev))
-		return gen9_init_workarounds(ring);
+
+	if (IS_BROXTON(dev))
+		return bxt_init_workarounds(ring);
 
 	return 0;
 }

+ 1 - 1
drivers/gpu/drm/i915/intel_sprite.c

@@ -1144,7 +1144,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
 	drm_modeset_lock_all(dev);
 
 	plane = drm_plane_find(dev, set->plane_id);
-	if (!plane) {
+	if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
 		ret = -ENOENT;
 		goto out_unlock;
 	}

+ 11 - 0
drivers/gpu/drm/msm/Kconfig

@@ -35,3 +35,14 @@ config DRM_MSM_REGISTER_LOGGING
 	  Compile in support for logging register reads/writes in a format
 	  that can be parsed by envytools demsm tool.  If enabled, register
 	  logging can be switched on via msm.reglog=y module param.
+
+config DRM_MSM_DSI
+	bool "Enable DSI support in MSM DRM driver"
+	depends on DRM_MSM
+	select DRM_PANEL
+	select DRM_MIPI_DSI
+	default y
+	help
+	  Choose this option if you have a need for MIPI DSI connector
+	  support.
+

+ 5 - 0
drivers/gpu/drm/msm/Makefile

@@ -50,5 +50,10 @@ msm-y := \
 
 msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
 msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
+msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
+			dsi/dsi_host.o \
+			dsi/dsi_manager.o \
+			dsi/dsi_phy.o \
+			mdp/mdp5/mdp5_cmd_encoder.o
 
 obj-$(CONFIG_DRM_MSM)	+= msm.o

+ 212 - 0
drivers/gpu/drm/msm/dsi/dsi.c

@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi.h"
+
+struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
+{
+	if (!msm_dsi || !msm_dsi->panel)
+		return NULL;
+
+	return (msm_dsi->panel_flags & MIPI_DSI_MODE_VIDEO) ?
+		msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] :
+		msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID];
+}
+
+static void dsi_destroy(struct msm_dsi *msm_dsi)
+{
+	if (!msm_dsi)
+		return;
+
+	msm_dsi_manager_unregister(msm_dsi);
+	if (msm_dsi->host) {
+		msm_dsi_host_destroy(msm_dsi->host);
+		msm_dsi->host = NULL;
+	}
+
+	platform_set_drvdata(msm_dsi->pdev, NULL);
+}
+
+static struct msm_dsi *dsi_init(struct platform_device *pdev)
+{
+	struct msm_dsi *msm_dsi = NULL;
+	int ret;
+
+	if (!pdev) {
+		dev_err(&pdev->dev, "no dsi device\n");
+		ret = -ENXIO;
+		goto fail;
+	}
+
+	msm_dsi = devm_kzalloc(&pdev->dev, sizeof(*msm_dsi), GFP_KERNEL);
+	if (!msm_dsi) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+	DBG("dsi probed=%p", msm_dsi);
+
+	msm_dsi->pdev = pdev;
+	platform_set_drvdata(pdev, msm_dsi);
+
+	/* Init dsi host */
+	ret = msm_dsi_host_init(msm_dsi);
+	if (ret)
+		goto fail;
+
+	/* Register to dsi manager */
+	ret = msm_dsi_manager_register(msm_dsi);
+	if (ret)
+		goto fail;
+
+	return msm_dsi;
+
+fail:
+	if (msm_dsi)
+		dsi_destroy(msm_dsi);
+
+	return ERR_PTR(ret);
+}
+
+static int dsi_bind(struct device *dev, struct device *master, void *data)
+{
+	struct drm_device *drm = dev_get_drvdata(master);
+	struct msm_drm_private *priv = drm->dev_private;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_dsi *msm_dsi;
+
+	DBG("");
+	msm_dsi = dsi_init(pdev);
+	if (IS_ERR(msm_dsi))
+		return PTR_ERR(msm_dsi);
+
+	priv->dsi[msm_dsi->id] = msm_dsi;
+
+	return 0;
+}
+
+static void dsi_unbind(struct device *dev, struct device *master,
+		void *data)
+{
+	struct drm_device *drm = dev_get_drvdata(master);
+	struct msm_drm_private *priv = drm->dev_private;
+	struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
+	int id = msm_dsi->id;
+
+	if (priv->dsi[id]) {
+		dsi_destroy(msm_dsi);
+		priv->dsi[id] = NULL;
+	}
+}
+
+static const struct component_ops dsi_ops = {
+	.bind   = dsi_bind,
+	.unbind = dsi_unbind,
+};
+
+static int dsi_dev_probe(struct platform_device *pdev)
+{
+	return component_add(&pdev->dev, &dsi_ops);
+}
+
+static int dsi_dev_remove(struct platform_device *pdev)
+{
+	DBG("");
+	component_del(&pdev->dev, &dsi_ops);
+	return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+	{ .compatible = "qcom,mdss-dsi-ctrl" },
+	{}
+};
+
+static struct platform_driver dsi_driver = {
+	.probe = dsi_dev_probe,
+	.remove = dsi_dev_remove,
+	.driver = {
+		.name = "msm_dsi",
+		.of_match_table = dt_match,
+	},
+};
+
+void __init msm_dsi_register(void)
+{
+	DBG("");
+	platform_driver_register(&dsi_driver);
+}
+
+void __exit msm_dsi_unregister(void)
+{
+	DBG("");
+	platform_driver_unregister(&dsi_driver);
+}
+
+int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
+		struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	int ret, i;
+
+	if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] ||
+		!encoders[MSM_DSI_CMD_ENCODER_ID]))
+		return -EINVAL;
+
+	msm_dsi->dev = dev;
+
+	ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
+	if (ret) {
+		dev_err(dev->dev, "failed to modeset init host: %d\n", ret);
+		goto fail;
+	}
+
+	msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
+	if (IS_ERR(msm_dsi->bridge)) {
+		ret = PTR_ERR(msm_dsi->bridge);
+		dev_err(dev->dev, "failed to create dsi bridge: %d\n", ret);
+		msm_dsi->bridge = NULL;
+		goto fail;
+	}
+
+	msm_dsi->connector = msm_dsi_manager_connector_init(msm_dsi->id);
+	if (IS_ERR(msm_dsi->connector)) {
+		ret = PTR_ERR(msm_dsi->connector);
+		dev_err(dev->dev, "failed to create dsi connector: %d\n", ret);
+		msm_dsi->connector = NULL;
+		goto fail;
+	}
+
+	for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
+		encoders[i]->bridge = msm_dsi->bridge;
+		msm_dsi->encoders[i] = encoders[i];
+	}
+
+	priv->bridges[priv->num_bridges++]       = msm_dsi->bridge;
+	priv->connectors[priv->num_connectors++] = msm_dsi->connector;
+
+	return 0;
+fail:
+	if (msm_dsi) {
+		/* bridge/connector are normally destroyed by drm: */
+		if (msm_dsi->bridge) {
+			msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
+			msm_dsi->bridge = NULL;
+		}
+		if (msm_dsi->connector) {
+			msm_dsi->connector->funcs->destroy(msm_dsi->connector);
+			msm_dsi->connector = NULL;
+		}
+	}
+
+	return ret;
+}
+

+ 117 - 0
drivers/gpu/drm/msm/dsi/dsi.h

@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DSI_CONNECTOR_H__
+#define __DSI_CONNECTOR_H__
+
+#include <linux/platform_device.h>
+
+#include "drm_crtc.h"
+#include "drm_mipi_dsi.h"
+#include "drm_panel.h"
+
+#include "msm_drv.h"
+
+#define DSI_0	0
+#define DSI_1	1
+#define DSI_MAX	2
+
+#define DSI_CLOCK_MASTER	DSI_0
+#define DSI_CLOCK_SLAVE		DSI_1
+
+#define DSI_LEFT		DSI_0
+#define DSI_RIGHT		DSI_1
+
+/* According to the current drm framework sequence, take the encoder of
+ * DSI_1 as master encoder
+ */
+#define DSI_ENCODER_MASTER	DSI_1
+#define DSI_ENCODER_SLAVE	DSI_0
+
+struct msm_dsi {
+	struct drm_device *dev;
+	struct platform_device *pdev;
+
+	struct drm_connector *connector;
+	struct drm_bridge *bridge;
+
+	struct mipi_dsi_host *host;
+	struct msm_dsi_phy *phy;
+	struct drm_panel *panel;
+	unsigned long panel_flags;
+	bool phy_enabled;
+
+	/* the encoders we are hooked to (outside of dsi block) */
+	struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM];
+
+	int id;
+};
+
+/* dsi manager */
+struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
+void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
+struct drm_connector *msm_dsi_manager_connector_init(u8 id);
+int msm_dsi_manager_phy_enable(int id,
+		const unsigned long bit_rate, const unsigned long esc_rate,
+		u32 *clk_pre, u32 *clk_post);
+void msm_dsi_manager_phy_disable(int id);
+int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len);
+int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
+void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
+
+/* msm dsi */
+struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi);
+
+/* dsi host */
+int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
+					const struct mipi_dsi_msg *msg);
+void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
+					const struct mipi_dsi_msg *msg);
+int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
+					const struct mipi_dsi_msg *msg);
+int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
+					const struct mipi_dsi_msg *msg);
+void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
+					u32 iova, u32 len);
+int msm_dsi_host_enable(struct mipi_dsi_host *host);
+int msm_dsi_host_disable(struct mipi_dsi_host *host);
+int msm_dsi_host_power_on(struct mipi_dsi_host *host);
+int msm_dsi_host_power_off(struct mipi_dsi_host *host);
+int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
+					struct drm_display_mode *mode);
+struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
+					unsigned long *panel_flags);
+int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
+void msm_dsi_host_unregister(struct mipi_dsi_host *host);
+void msm_dsi_host_destroy(struct mipi_dsi_host *host);
+int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
+					struct drm_device *dev);
+int msm_dsi_host_init(struct msm_dsi *msm_dsi);
+
+/* dsi phy */
+struct msm_dsi_phy;
+enum msm_dsi_phy_type {
+	MSM_DSI_PHY_UNKNOWN,
+	MSM_DSI_PHY_28NM,
+	MSM_DSI_PHY_MAX
+};
+struct msm_dsi_phy *msm_dsi_phy_init(struct platform_device *pdev,
+			enum msm_dsi_phy_type type, int id);
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
+	const unsigned long bit_rate, const unsigned long esc_rate);
+int msm_dsi_phy_disable(struct msm_dsi_phy *phy);
+void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
+					u32 *clk_pre, u32 *clk_post);
+#endif /* __DSI_CONNECTOR_H__ */
+

+ 376 - 42
drivers/gpu/drm/msm/dsi/dsi.xml.h

@@ -8,19 +8,10 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20908 bytes, from 2014-12-08 16:13:00)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2357 bytes, from 2014-12-08 16:13:00)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  27208 bytes, from 2015-01-13 23:56:11)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  11712 bytes, from 2013-08-17 17:13:43)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  26848 bytes, from 2015-01-13 23:55:57)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (   8253 bytes, from 2014-12-08 16:13:00)
-
-Copyright (C) 2013 by the following authors:
+- /usr2/hali/local/envytools/envytools/rnndb/dsi/dsi.xml             (  18681 bytes, from 2015-03-04 23:08:31)
+- /usr2/hali/local/envytools/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-01-28 21:43:22)
+
+Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 
 Permission is hereby granted, free of charge, to any person obtaining
@@ -51,11 +42,11 @@ enum dsi_traffic_mode {
 	BURST_MODE = 2,
 };
 
-enum dsi_dst_format {
-	DST_FORMAT_RGB565 = 0,
-	DST_FORMAT_RGB666 = 1,
-	DST_FORMAT_RGB666_LOOSE = 2,
-	DST_FORMAT_RGB888 = 3,
+enum dsi_vid_dst_format {
+	VID_DST_FORMAT_RGB565 = 0,
+	VID_DST_FORMAT_RGB666 = 1,
+	VID_DST_FORMAT_RGB666_LOOSE = 2,
+	VID_DST_FORMAT_RGB888 = 3,
 };
 
 enum dsi_rgb_swap {
@@ -69,20 +60,63 @@ enum dsi_rgb_swap {
 
 enum dsi_cmd_trigger {
 	TRIGGER_NONE = 0,
+	TRIGGER_SEOF = 1,
 	TRIGGER_TE = 2,
 	TRIGGER_SW = 4,
 	TRIGGER_SW_SEOF = 5,
 	TRIGGER_SW_TE = 6,
 };
 
+enum dsi_cmd_dst_format {
+	CMD_DST_FORMAT_RGB111 = 0,
+	CMD_DST_FORMAT_RGB332 = 3,
+	CMD_DST_FORMAT_RGB444 = 4,
+	CMD_DST_FORMAT_RGB565 = 6,
+	CMD_DST_FORMAT_RGB666 = 7,
+	CMD_DST_FORMAT_RGB888 = 8,
+};
+
+enum dsi_lane_swap {
+	LANE_SWAP_0123 = 0,
+	LANE_SWAP_3012 = 1,
+	LANE_SWAP_2301 = 2,
+	LANE_SWAP_1230 = 3,
+	LANE_SWAP_0321 = 4,
+	LANE_SWAP_1032 = 5,
+	LANE_SWAP_2103 = 6,
+	LANE_SWAP_3210 = 7,
+};
+
 #define DSI_IRQ_CMD_DMA_DONE					0x00000001
 #define DSI_IRQ_MASK_CMD_DMA_DONE				0x00000002
 #define DSI_IRQ_CMD_MDP_DONE					0x00000100
 #define DSI_IRQ_MASK_CMD_MDP_DONE				0x00000200
 #define DSI_IRQ_VIDEO_DONE					0x00010000
 #define DSI_IRQ_MASK_VIDEO_DONE					0x00020000
+#define DSI_IRQ_BTA_DONE					0x00100000
+#define DSI_IRQ_MASK_BTA_DONE					0x00200000
 #define DSI_IRQ_ERROR						0x01000000
 #define DSI_IRQ_MASK_ERROR					0x02000000
+#define REG_DSI_6G_HW_VERSION					0x00000000
+#define DSI_6G_HW_VERSION_MAJOR__MASK				0xf0000000
+#define DSI_6G_HW_VERSION_MAJOR__SHIFT				28
+static inline uint32_t DSI_6G_HW_VERSION_MAJOR(uint32_t val)
+{
+	return ((val) << DSI_6G_HW_VERSION_MAJOR__SHIFT) & DSI_6G_HW_VERSION_MAJOR__MASK;
+}
+#define DSI_6G_HW_VERSION_MINOR__MASK				0x0fff0000
+#define DSI_6G_HW_VERSION_MINOR__SHIFT				16
+static inline uint32_t DSI_6G_HW_VERSION_MINOR(uint32_t val)
+{
+	return ((val) << DSI_6G_HW_VERSION_MINOR__SHIFT) & DSI_6G_HW_VERSION_MINOR__MASK;
+}
+#define DSI_6G_HW_VERSION_STEP__MASK				0x0000ffff
+#define DSI_6G_HW_VERSION_STEP__SHIFT				0
+static inline uint32_t DSI_6G_HW_VERSION_STEP(uint32_t val)
+{
+	return ((val) << DSI_6G_HW_VERSION_STEP__SHIFT) & DSI_6G_HW_VERSION_STEP__MASK;
+}
+
 #define REG_DSI_CTRL						0x00000000
 #define DSI_CTRL_ENABLE						0x00000001
 #define DSI_CTRL_VID_MODE_EN					0x00000002
@@ -96,11 +130,15 @@ enum dsi_cmd_trigger {
 #define DSI_CTRL_CRC_CHECK					0x01000000
 
 #define REG_DSI_STATUS0						0x00000004
+#define DSI_STATUS0_CMD_MODE_ENGINE_BUSY			0x00000001
 #define DSI_STATUS0_CMD_MODE_DMA_BUSY				0x00000002
+#define DSI_STATUS0_CMD_MODE_MDP_BUSY				0x00000004
 #define DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY			0x00000008
 #define DSI_STATUS0_DSI_BUSY					0x00000010
+#define DSI_STATUS0_INTERLEAVE_OP_CONTENTION			0x80000000
 
 #define REG_DSI_FIFO_STATUS					0x00000008
+#define DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW			0x00000080
 
 #define REG_DSI_VID_CFG0					0x0000000c
 #define DSI_VID_CFG0_VIRT_CHANNEL__MASK				0x00000003
@@ -111,7 +149,7 @@ static inline uint32_t DSI_VID_CFG0_VIRT_CHANNEL(uint32_t val)
 }
 #define DSI_VID_CFG0_DST_FORMAT__MASK				0x00000030
 #define DSI_VID_CFG0_DST_FORMAT__SHIFT				4
-static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_dst_format val)
+static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_vid_dst_format val)
 {
 	return ((val) << DSI_VID_CFG0_DST_FORMAT__SHIFT) & DSI_VID_CFG0_DST_FORMAT__MASK;
 }
@@ -129,21 +167,15 @@ static inline uint32_t DSI_VID_CFG0_TRAFFIC_MODE(enum dsi_traffic_mode val)
 #define DSI_VID_CFG0_PULSE_MODE_HSA_HE				0x10000000
 
 #define REG_DSI_VID_CFG1					0x0000001c
-#define DSI_VID_CFG1_R_SEL					0x00000010
-#define DSI_VID_CFG1_G_SEL					0x00000100
-#define DSI_VID_CFG1_B_SEL					0x00001000
-#define DSI_VID_CFG1_RGB_SWAP__MASK				0x00070000
-#define DSI_VID_CFG1_RGB_SWAP__SHIFT				16
+#define DSI_VID_CFG1_R_SEL					0x00000001
+#define DSI_VID_CFG1_G_SEL					0x00000010
+#define DSI_VID_CFG1_B_SEL					0x00000100
+#define DSI_VID_CFG1_RGB_SWAP__MASK				0x00007000
+#define DSI_VID_CFG1_RGB_SWAP__SHIFT				12
 static inline uint32_t DSI_VID_CFG1_RGB_SWAP(enum dsi_rgb_swap val)
 {
 	return ((val) << DSI_VID_CFG1_RGB_SWAP__SHIFT) & DSI_VID_CFG1_RGB_SWAP__MASK;
 }
-#define DSI_VID_CFG1_INTERLEAVE_MAX__MASK			0x00f00000
-#define DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT			20
-static inline uint32_t DSI_VID_CFG1_INTERLEAVE_MAX(uint32_t val)
-{
-	return ((val) << DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT) & DSI_VID_CFG1_INTERLEAVE_MAX__MASK;
-}
 
 #define REG_DSI_ACTIVE_H					0x00000020
 #define DSI_ACTIVE_H_START__MASK				0x00000fff
@@ -201,32 +233,115 @@ static inline uint32_t DSI_ACTIVE_HSYNC_END(uint32_t val)
 	return ((val) << DSI_ACTIVE_HSYNC_END__SHIFT) & DSI_ACTIVE_HSYNC_END__MASK;
 }
 
-#define REG_DSI_ACTIVE_VSYNC					0x00000034
-#define DSI_ACTIVE_VSYNC_START__MASK				0x00000fff
-#define DSI_ACTIVE_VSYNC_START__SHIFT				0
-static inline uint32_t DSI_ACTIVE_VSYNC_START(uint32_t val)
+#define REG_DSI_ACTIVE_VSYNC_HPOS				0x00000030
+#define DSI_ACTIVE_VSYNC_HPOS_START__MASK			0x00000fff
+#define DSI_ACTIVE_VSYNC_HPOS_START__SHIFT			0
+static inline uint32_t DSI_ACTIVE_VSYNC_HPOS_START(uint32_t val)
 {
-	return ((val) << DSI_ACTIVE_VSYNC_START__SHIFT) & DSI_ACTIVE_VSYNC_START__MASK;
+	return ((val) << DSI_ACTIVE_VSYNC_HPOS_START__SHIFT) & DSI_ACTIVE_VSYNC_HPOS_START__MASK;
 }
-#define DSI_ACTIVE_VSYNC_END__MASK				0x0fff0000
-#define DSI_ACTIVE_VSYNC_END__SHIFT				16
-static inline uint32_t DSI_ACTIVE_VSYNC_END(uint32_t val)
+#define DSI_ACTIVE_VSYNC_HPOS_END__MASK				0x0fff0000
+#define DSI_ACTIVE_VSYNC_HPOS_END__SHIFT			16
+static inline uint32_t DSI_ACTIVE_VSYNC_HPOS_END(uint32_t val)
 {
-	return ((val) << DSI_ACTIVE_VSYNC_END__SHIFT) & DSI_ACTIVE_VSYNC_END__MASK;
+	return ((val) << DSI_ACTIVE_VSYNC_HPOS_END__SHIFT) & DSI_ACTIVE_VSYNC_HPOS_END__MASK;
+}
+
+#define REG_DSI_ACTIVE_VSYNC_VPOS				0x00000034
+#define DSI_ACTIVE_VSYNC_VPOS_START__MASK			0x00000fff
+#define DSI_ACTIVE_VSYNC_VPOS_START__SHIFT			0
+static inline uint32_t DSI_ACTIVE_VSYNC_VPOS_START(uint32_t val)
+{
+	return ((val) << DSI_ACTIVE_VSYNC_VPOS_START__SHIFT) & DSI_ACTIVE_VSYNC_VPOS_START__MASK;
+}
+#define DSI_ACTIVE_VSYNC_VPOS_END__MASK				0x0fff0000
+#define DSI_ACTIVE_VSYNC_VPOS_END__SHIFT			16
+static inline uint32_t DSI_ACTIVE_VSYNC_VPOS_END(uint32_t val)
+{
+	return ((val) << DSI_ACTIVE_VSYNC_VPOS_END__SHIFT) & DSI_ACTIVE_VSYNC_VPOS_END__MASK;
 }
 
 #define REG_DSI_CMD_DMA_CTRL					0x00000038
+#define DSI_CMD_DMA_CTRL_BROADCAST_EN				0x80000000
 #define DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER			0x10000000
 #define DSI_CMD_DMA_CTRL_LOW_POWER				0x04000000
 
 #define REG_DSI_CMD_CFG0					0x0000003c
+#define DSI_CMD_CFG0_DST_FORMAT__MASK				0x0000000f
+#define DSI_CMD_CFG0_DST_FORMAT__SHIFT				0
+static inline uint32_t DSI_CMD_CFG0_DST_FORMAT(enum dsi_cmd_dst_format val)
+{
+	return ((val) << DSI_CMD_CFG0_DST_FORMAT__SHIFT) & DSI_CMD_CFG0_DST_FORMAT__MASK;
+}
+#define DSI_CMD_CFG0_R_SEL					0x00000010
+#define DSI_CMD_CFG0_G_SEL					0x00000100
+#define DSI_CMD_CFG0_B_SEL					0x00001000
+#define DSI_CMD_CFG0_INTERLEAVE_MAX__MASK			0x00f00000
+#define DSI_CMD_CFG0_INTERLEAVE_MAX__SHIFT			20
+static inline uint32_t DSI_CMD_CFG0_INTERLEAVE_MAX(uint32_t val)
+{
+	return ((val) << DSI_CMD_CFG0_INTERLEAVE_MAX__SHIFT) & DSI_CMD_CFG0_INTERLEAVE_MAX__MASK;
+}
+#define DSI_CMD_CFG0_RGB_SWAP__MASK				0x00070000
+#define DSI_CMD_CFG0_RGB_SWAP__SHIFT				16
+static inline uint32_t DSI_CMD_CFG0_RGB_SWAP(enum dsi_rgb_swap val)
+{
+	return ((val) << DSI_CMD_CFG0_RGB_SWAP__SHIFT) & DSI_CMD_CFG0_RGB_SWAP__MASK;
+}
 
 #define REG_DSI_CMD_CFG1					0x00000040
+#define DSI_CMD_CFG1_WR_MEM_START__MASK				0x000000ff
+#define DSI_CMD_CFG1_WR_MEM_START__SHIFT			0
+static inline uint32_t DSI_CMD_CFG1_WR_MEM_START(uint32_t val)
+{
+	return ((val) << DSI_CMD_CFG1_WR_MEM_START__SHIFT) & DSI_CMD_CFG1_WR_MEM_START__MASK;
+}
+#define DSI_CMD_CFG1_WR_MEM_CONTINUE__MASK			0x0000ff00
+#define DSI_CMD_CFG1_WR_MEM_CONTINUE__SHIFT			8
+static inline uint32_t DSI_CMD_CFG1_WR_MEM_CONTINUE(uint32_t val)
+{
+	return ((val) << DSI_CMD_CFG1_WR_MEM_CONTINUE__SHIFT) & DSI_CMD_CFG1_WR_MEM_CONTINUE__MASK;
+}
+#define DSI_CMD_CFG1_INSERT_DCS_COMMAND				0x00010000
 
 #define REG_DSI_DMA_BASE					0x00000044
 
 #define REG_DSI_DMA_LEN						0x00000048
 
+#define REG_DSI_CMD_MDP_STREAM_CTRL				0x00000054
+#define DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__MASK			0x0000003f
+#define DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__SHIFT		0
+static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(uint32_t val)
+{
+	return ((val) << DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE__MASK;
+}
+#define DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__MASK		0x00000300
+#define DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__SHIFT		8
+static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(uint32_t val)
+{
+	return ((val) << DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL__MASK;
+}
+#define DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__MASK		0xffff0000
+#define DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__SHIFT		16
+static inline uint32_t DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(uint32_t val)
+{
+	return ((val) << DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT__MASK;
+}
+
+#define REG_DSI_CMD_MDP_STREAM_TOTAL				0x00000058
+#define DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__MASK			0x00000fff
+#define DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__SHIFT			0
+static inline uint32_t DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(uint32_t val)
+{
+	return ((val) << DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL__MASK;
+}
+#define DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__MASK			0x0fff0000
+#define DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__SHIFT			16
+static inline uint32_t DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(uint32_t val)
+{
+	return ((val) << DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL__MASK;
+}
+
 #define REG_DSI_ACK_ERR_STATUS					0x00000064
 
 static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; }
@@ -234,19 +349,25 @@ static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; }
 static inline uint32_t REG_DSI_RDBK_DATA(uint32_t i0) { return 0x00000068 + 0x4*i0; }
 
 #define REG_DSI_TRIG_CTRL					0x00000080
-#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK				0x0000000f
+#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK				0x00000007
 #define DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT			0
 static inline uint32_t DSI_TRIG_CTRL_DMA_TRIGGER(enum dsi_cmd_trigger val)
 {
 	return ((val) << DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT) & DSI_TRIG_CTRL_DMA_TRIGGER__MASK;
 }
-#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK				0x000000f0
+#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK				0x00000070
 #define DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT			4
 static inline uint32_t DSI_TRIG_CTRL_MDP_TRIGGER(enum dsi_cmd_trigger val)
 {
 	return ((val) << DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT) & DSI_TRIG_CTRL_MDP_TRIGGER__MASK;
 }
-#define DSI_TRIG_CTRL_STREAM					0x00000100
+#define DSI_TRIG_CTRL_STREAM__MASK				0x00000300
+#define DSI_TRIG_CTRL_STREAM__SHIFT				8
+static inline uint32_t DSI_TRIG_CTRL_STREAM(uint32_t val)
+{
+	return ((val) << DSI_TRIG_CTRL_STREAM__SHIFT) & DSI_TRIG_CTRL_STREAM__MASK;
+}
+#define DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME			0x00001000
 #define DSI_TRIG_CTRL_TE					0x80000000
 
 #define REG_DSI_TRIG_DMA					0x0000008c
@@ -274,6 +395,12 @@ static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
 #define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE			0x00000010
 
 #define REG_DSI_LANE_SWAP_CTRL					0x000000ac
+#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK			0x00000007
+#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT			0
+static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val)
+{
+	return ((val) << DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT) & DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK;
+}
 
 #define REG_DSI_ERR_INT_MASK0					0x00000108
 
@@ -282,8 +409,36 @@ static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
 #define REG_DSI_RESET						0x00000114
 
 #define REG_DSI_CLK_CTRL					0x00000118
+#define DSI_CLK_CTRL_AHBS_HCLK_ON				0x00000001
+#define DSI_CLK_CTRL_AHBM_SCLK_ON				0x00000002
+#define DSI_CLK_CTRL_PCLK_ON					0x00000004
+#define DSI_CLK_CTRL_DSICLK_ON					0x00000008
+#define DSI_CLK_CTRL_BYTECLK_ON					0x00000010
+#define DSI_CLK_CTRL_ESCCLK_ON					0x00000020
+#define DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK			0x00000200
+
+#define REG_DSI_CLK_STATUS					0x0000011c
+#define DSI_CLK_STATUS_PLL_UNLOCKED				0x00010000
 
 #define REG_DSI_PHY_RESET					0x00000128
+#define DSI_PHY_RESET_RESET					0x00000001
+
+#define REG_DSI_RDBK_DATA_CTRL					0x000001d0
+#define DSI_RDBK_DATA_CTRL_COUNT__MASK				0x00ff0000
+#define DSI_RDBK_DATA_CTRL_COUNT__SHIFT				16
+static inline uint32_t DSI_RDBK_DATA_CTRL_COUNT(uint32_t val)
+{
+	return ((val) << DSI_RDBK_DATA_CTRL_COUNT__SHIFT) & DSI_RDBK_DATA_CTRL_COUNT__MASK;
+}
+#define DSI_RDBK_DATA_CTRL_CLR					0x00000001
+
+#define REG_DSI_VERSION						0x000001f0
+#define DSI_VERSION_MAJOR__MASK					0xff000000
+#define DSI_VERSION_MAJOR__SHIFT				24
+static inline uint32_t DSI_VERSION_MAJOR(uint32_t val)
+{
+	return ((val) << DSI_VERSION_MAJOR__SHIFT) & DSI_VERSION_MAJOR__MASK;
+}
 
 #define REG_DSI_PHY_PLL_CTRL_0					0x00000200
 #define DSI_PHY_PLL_CTRL_0_ENABLE				0x00000001
@@ -501,5 +656,184 @@ static inline uint32_t REG_DSI_8960_LN_TEST_STR_1(uint32_t i0) { return 0x000003
 #define REG_DSI_8960_PHY_CAL_STATUS				0x00000550
 #define DSI_8960_PHY_CAL_STATUS_CAL_BUSY			0x00000010
 
+static inline uint32_t REG_DSI_28nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; }
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_0				0x00000100
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_1				0x00000104
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_2				0x00000108
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_3				0x0000010c
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_4				0x00000110
+
+#define REG_DSI_28nm_PHY_LNCK_TEST_DATAPATH			0x00000114
+
+#define REG_DSI_28nm_PHY_LNCK_DEBUG_SEL				0x00000118
+
+#define REG_DSI_28nm_PHY_LNCK_TEST_STR0				0x0000011c
+
+#define REG_DSI_28nm_PHY_LNCK_TEST_STR1				0x00000120
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_0				0x00000140
+#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK		0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_1				0x00000144
+#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK		0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_2				0x00000148
+#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK		0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_3				0x0000014c
+#define DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8			0x00000001
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_4				0x00000150
+#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK		0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_5				0x00000154
+#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK		0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_6				0x00000158
+#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK		0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_7				0x0000015c
+#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK		0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_8				0x00000160
+#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK		0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_9				0x00000164
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK			0x00000007
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT			0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK		0x00000070
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT		4
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_10				0x00000168
+#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK		0x00000007
+#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_11				0x0000016c
+#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK		0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT		0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+	return ((val) << DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+#define REG_DSI_28nm_PHY_CTRL_0					0x00000170
+
+#define REG_DSI_28nm_PHY_CTRL_1					0x00000174
+
+#define REG_DSI_28nm_PHY_CTRL_2					0x00000178
+
+#define REG_DSI_28nm_PHY_CTRL_3					0x0000017c
+
+#define REG_DSI_28nm_PHY_CTRL_4					0x00000180
+
+#define REG_DSI_28nm_PHY_STRENGTH_0				0x00000184
+
+#define REG_DSI_28nm_PHY_STRENGTH_1				0x00000188
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_0				0x000001b4
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_1				0x000001b8
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_2				0x000001bc
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_3				0x000001c0
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_4				0x000001c4
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_5				0x000001c8
+
+#define REG_DSI_28nm_PHY_GLBL_TEST_CTRL				0x000001d4
+
+#define REG_DSI_28nm_PHY_LDO_CNTRL				0x000001dc
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_0			0x00000000
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_1			0x00000004
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_2			0x00000008
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_3			0x0000000c
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_4			0x00000010
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_5			0x00000014
+
+#define REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG			0x00000018
+
 
 #endif /* DSI_XML */

+ 1993 - 0
drivers/gpu/drm/msm/dsi/dsi_host.c

@@ -0,0 +1,1993 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spinlock.h>
+#include <video/mipi_display.h>
+
+#include "dsi.h"
+#include "dsi.xml.h"
+
+#define MSM_DSI_VER_MAJOR_V2	0x02
+#define MSM_DSI_VER_MAJOR_6G	0x03
+#define MSM_DSI_6G_VER_MINOR_V1_0	0x10000000
+#define MSM_DSI_6G_VER_MINOR_V1_1	0x10010000
+#define MSM_DSI_6G_VER_MINOR_V1_1_1	0x10010001
+#define MSM_DSI_6G_VER_MINOR_V1_2	0x10020000
+#define MSM_DSI_6G_VER_MINOR_V1_3_1	0x10030001
+
+#define DSI_6G_REG_SHIFT	4
+
+#define DSI_REGULATOR_MAX	8
+struct dsi_reg_entry {
+	char name[32];
+	int min_voltage;
+	int max_voltage;
+	int enable_load;
+	int disable_load;
+};
+
+struct dsi_reg_config {
+	int num;
+	struct dsi_reg_entry regs[DSI_REGULATOR_MAX];
+};
+
+struct dsi_config {
+	u32 major;
+	u32 minor;
+	u32 io_offset;
+	enum msm_dsi_phy_type phy_type;
+	struct dsi_reg_config reg_cfg;
+};
+
+static const struct dsi_config dsi_cfgs[] = {
+	{MSM_DSI_VER_MAJOR_V2, 0, 0, MSM_DSI_PHY_UNKNOWN},
+	{ /* 8974 v1 */
+		.major = MSM_DSI_VER_MAJOR_6G,
+		.minor = MSM_DSI_6G_VER_MINOR_V1_0,
+		.io_offset = DSI_6G_REG_SHIFT,
+		.phy_type = MSM_DSI_PHY_28NM,
+		.reg_cfg = {
+			.num = 4,
+			.regs = {
+				{"gdsc", -1, -1, -1, -1},
+				{"vdd", 3000000, 3000000, 150000, 100},
+				{"vdda", 1200000, 1200000, 100000, 100},
+				{"vddio", 1800000, 1800000, 100000, 100},
+			},
+		},
+	},
+	{ /* 8974 v2 */
+		.major = MSM_DSI_VER_MAJOR_6G,
+		.minor = MSM_DSI_6G_VER_MINOR_V1_1,
+		.io_offset = DSI_6G_REG_SHIFT,
+		.phy_type = MSM_DSI_PHY_28NM,
+		.reg_cfg = {
+			.num = 4,
+			.regs = {
+				{"gdsc", -1, -1, -1, -1},
+				{"vdd", 3000000, 3000000, 150000, 100},
+				{"vdda", 1200000, 1200000, 100000, 100},
+				{"vddio", 1800000, 1800000, 100000, 100},
+			},
+		},
+	},
+	{ /* 8974 v3 */
+		.major = MSM_DSI_VER_MAJOR_6G,
+		.minor = MSM_DSI_6G_VER_MINOR_V1_1_1,
+		.io_offset = DSI_6G_REG_SHIFT,
+		.phy_type = MSM_DSI_PHY_28NM,
+		.reg_cfg = {
+			.num = 4,
+			.regs = {
+				{"gdsc", -1, -1, -1, -1},
+				{"vdd", 3000000, 3000000, 150000, 100},
+				{"vdda", 1200000, 1200000, 100000, 100},
+				{"vddio", 1800000, 1800000, 100000, 100},
+			},
+		},
+	},
+	{ /* 8084 */
+		.major = MSM_DSI_VER_MAJOR_6G,
+		.minor = MSM_DSI_6G_VER_MINOR_V1_2,
+		.io_offset = DSI_6G_REG_SHIFT,
+		.phy_type = MSM_DSI_PHY_28NM,
+		.reg_cfg = {
+			.num = 4,
+			.regs = {
+				{"gdsc", -1, -1, -1, -1},
+				{"vdd", 3000000, 3000000, 150000, 100},
+				{"vdda", 1200000, 1200000, 100000, 100},
+				{"vddio", 1800000, 1800000, 100000, 100},
+			},
+		},
+	},
+	{ /* 8916 */
+		.major = MSM_DSI_VER_MAJOR_6G,
+		.minor = MSM_DSI_6G_VER_MINOR_V1_3_1,
+		.io_offset = DSI_6G_REG_SHIFT,
+		.phy_type = MSM_DSI_PHY_28NM,
+		.reg_cfg = {
+			.num = 4,
+			.regs = {
+				{"gdsc", -1, -1, -1, -1},
+				{"vdd", 2850000, 2850000, 100000, 100},
+				{"vdda", 1200000, 1200000, 100000, 100},
+				{"vddio", 1800000, 1800000, 100000, 100},
+			},
+		},
+	},
+};
+
+static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
+{
+	u32 ver;
+	u32 ver_6g;
+
+	if (!major || !minor)
+		return -EINVAL;
+
+	/* From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
+	 * makes all other registers 4-byte shifted down.
+	 */
+	ver_6g = msm_readl(base + REG_DSI_6G_HW_VERSION);
+	if (ver_6g == 0) {
+		ver = msm_readl(base + REG_DSI_VERSION);
+		ver = FIELD(ver, DSI_VERSION_MAJOR);
+		if (ver <= MSM_DSI_VER_MAJOR_V2) {
+			/* old versions */
+			*major = ver;
+			*minor = 0;
+			return 0;
+		} else {
+			return -EINVAL;
+		}
+	} else {
+		ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
+		ver = FIELD(ver, DSI_VERSION_MAJOR);
+		if (ver == MSM_DSI_VER_MAJOR_6G) {
+			/* 6G version */
+			*major = ver;
+			*minor = ver_6g;
+			return 0;
+		} else {
+			return -EINVAL;
+		}
+	}
+}
+
+#define DSI_ERR_STATE_ACK			0x0000
+#define DSI_ERR_STATE_TIMEOUT			0x0001
+#define DSI_ERR_STATE_DLN0_PHY			0x0002
+#define DSI_ERR_STATE_FIFO			0x0004
+#define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW	0x0008
+#define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION	0x0010
+#define DSI_ERR_STATE_PLL_UNLOCKED		0x0020
+
+#define DSI_CLK_CTRL_ENABLE_CLKS	\
+		(DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
+		DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
+		DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
+		DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
+
+struct msm_dsi_host {
+	struct mipi_dsi_host base;
+
+	struct platform_device *pdev;
+	struct drm_device *dev;
+
+	int id;
+
+	void __iomem *ctrl_base;
+	struct regulator_bulk_data supplies[DSI_REGULATOR_MAX];
+	struct clk *mdp_core_clk;
+	struct clk *ahb_clk;
+	struct clk *axi_clk;
+	struct clk *mmss_misc_ahb_clk;
+	struct clk *byte_clk;
+	struct clk *esc_clk;
+	struct clk *pixel_clk;
+	u32 byte_clk_rate;
+
+	struct gpio_desc *disp_en_gpio;
+	struct gpio_desc *te_gpio;
+
+	const struct dsi_config *cfg;
+
+	struct completion dma_comp;
+	struct completion video_comp;
+	struct mutex dev_mutex;
+	struct mutex cmd_mutex;
+	struct mutex clk_mutex;
+	spinlock_t intr_lock; /* Protect interrupt ctrl register */
+
+	u32 err_work_state;
+	struct work_struct err_work;
+	struct workqueue_struct *workqueue;
+
+	struct drm_gem_object *tx_gem_obj;
+	u8 *rx_buf;
+
+	struct drm_display_mode *mode;
+
+	/* Panel info */
+	struct device_node *panel_node;
+	unsigned int channel;
+	unsigned int lanes;
+	enum mipi_dsi_pixel_format format;
+	unsigned long mode_flags;
+
+	u32 dma_cmd_ctrl_restore;
+
+	bool registered;
+	bool power_on;
+	int irq;
+};
+
+static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
+{
+	switch (fmt) {
+	case MIPI_DSI_FMT_RGB565:		return 16;
+	case MIPI_DSI_FMT_RGB666_PACKED:	return 18;
+	case MIPI_DSI_FMT_RGB666:
+	case MIPI_DSI_FMT_RGB888:
+	default:				return 24;
+	}
+}
+
+static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
+{
+	return msm_readl(msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
+}
+static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
+{
+	msm_writel(data, msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
+}
+
+static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
+static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
+
+static const struct dsi_config *dsi_get_config(struct msm_dsi_host *msm_host)
+{
+	const struct dsi_config *cfg;
+	struct regulator *gdsc_reg;
+	int i, ret;
+	u32 major = 0, minor = 0;
+
+	gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc");
+	if (IS_ERR_OR_NULL(gdsc_reg)) {
+		pr_err("%s: cannot get gdsc\n", __func__);
+		goto fail;
+	}
+	ret = regulator_enable(gdsc_reg);
+	if (ret) {
+		pr_err("%s: unable to enable gdsc\n", __func__);
+		regulator_put(gdsc_reg);
+		goto fail;
+	}
+	ret = clk_prepare_enable(msm_host->ahb_clk);
+	if (ret) {
+		pr_err("%s: unable to enable ahb_clk\n", __func__);
+		regulator_disable(gdsc_reg);
+		regulator_put(gdsc_reg);
+		goto fail;
+	}
+
+	ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
+
+	clk_disable_unprepare(msm_host->ahb_clk);
+	regulator_disable(gdsc_reg);
+	regulator_put(gdsc_reg);
+	if (ret) {
+		pr_err("%s: Invalid version\n", __func__);
+		goto fail;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(dsi_cfgs); i++) {
+		cfg = dsi_cfgs + i;
+		if ((cfg->major == major) && (cfg->minor == minor))
+			return cfg;
+	}
+	pr_err("%s: Version %x:%x not support\n", __func__, major, minor);
+
+fail:
+	return NULL;
+}
+
+static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
+{
+	return container_of(host, struct msm_dsi_host, base);
+}
+
+static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
+{
+	struct regulator_bulk_data *s = msm_host->supplies;
+	const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
+	int num = msm_host->cfg->reg_cfg.num;
+	int i;
+
+	DBG("");
+	for (i = num - 1; i >= 0; i--)
+		if (regs[i].disable_load >= 0)
+			regulator_set_optimum_mode(s[i].consumer,
+						regs[i].disable_load);
+
+	regulator_bulk_disable(num, s);
+}
+
+static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
+{
+	struct regulator_bulk_data *s = msm_host->supplies;
+	const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
+	int num = msm_host->cfg->reg_cfg.num;
+	int ret, i;
+
+	DBG("");
+	for (i = 0; i < num; i++) {
+		if (regs[i].enable_load >= 0) {
+			ret = regulator_set_optimum_mode(s[i].consumer,
+							regs[i].enable_load);
+			if (ret < 0) {
+				pr_err("regulator %d set op mode failed, %d\n",
+					i, ret);
+				goto fail;
+			}
+		}
+	}
+
+	ret = regulator_bulk_enable(num, s);
+	if (ret < 0) {
+		pr_err("regulator enable failed, %d\n", ret);
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	for (i--; i >= 0; i--)
+		regulator_set_optimum_mode(s[i].consumer, regs[i].disable_load);
+	return ret;
+}
+
+static int dsi_regulator_init(struct msm_dsi_host *msm_host)
+{
+	struct regulator_bulk_data *s = msm_host->supplies;
+	const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
+	int num = msm_host->cfg->reg_cfg.num;
+	int i, ret;
+
+	for (i = 0; i < num; i++)
+		s[i].supply = regs[i].name;
+
+	ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
+	if (ret < 0) {
+		pr_err("%s: failed to init regulator, ret=%d\n",
+						__func__, ret);
+		return ret;
+	}
+
+	for (i = 0; i < num; i++) {
+		if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
+			ret = regulator_set_voltage(s[i].consumer,
+				regs[i].min_voltage, regs[i].max_voltage);
+			if (ret < 0) {
+				pr_err("regulator %d set voltage failed, %d\n",
+					i, ret);
+				return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int dsi_clk_init(struct msm_dsi_host *msm_host)
+{
+	struct device *dev = &msm_host->pdev->dev;
+	int ret = 0;
+
+	msm_host->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk");
+	if (IS_ERR(msm_host->mdp_core_clk)) {
+		ret = PTR_ERR(msm_host->mdp_core_clk);
+		pr_err("%s: Unable to get mdp core clk. ret=%d\n",
+			__func__, ret);
+		goto exit;
+	}
+
+	msm_host->ahb_clk = devm_clk_get(dev, "iface_clk");
+	if (IS_ERR(msm_host->ahb_clk)) {
+		ret = PTR_ERR(msm_host->ahb_clk);
+		pr_err("%s: Unable to get mdss ahb clk. ret=%d\n",
+			__func__, ret);
+		goto exit;
+	}
+
+	msm_host->axi_clk = devm_clk_get(dev, "bus_clk");
+	if (IS_ERR(msm_host->axi_clk)) {
+		ret = PTR_ERR(msm_host->axi_clk);
+		pr_err("%s: Unable to get axi bus clk. ret=%d\n",
+			__func__, ret);
+		goto exit;
+	}
+
+	msm_host->mmss_misc_ahb_clk = devm_clk_get(dev, "core_mmss_clk");
+	if (IS_ERR(msm_host->mmss_misc_ahb_clk)) {
+		ret = PTR_ERR(msm_host->mmss_misc_ahb_clk);
+		pr_err("%s: Unable to get mmss misc ahb clk. ret=%d\n",
+			__func__, ret);
+		goto exit;
+	}
+
+	msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
+	if (IS_ERR(msm_host->byte_clk)) {
+		ret = PTR_ERR(msm_host->byte_clk);
+		pr_err("%s: can't find dsi_byte_clk. ret=%d\n",
+			__func__, ret);
+		msm_host->byte_clk = NULL;
+		goto exit;
+	}
+
+	msm_host->pixel_clk = devm_clk_get(dev, "pixel_clk");
+	if (IS_ERR(msm_host->pixel_clk)) {
+		ret = PTR_ERR(msm_host->pixel_clk);
+		pr_err("%s: can't find dsi_pixel_clk. ret=%d\n",
+			__func__, ret);
+		msm_host->pixel_clk = NULL;
+		goto exit;
+	}
+
+	msm_host->esc_clk = devm_clk_get(dev, "core_clk");
+	if (IS_ERR(msm_host->esc_clk)) {
+		ret = PTR_ERR(msm_host->esc_clk);
+		pr_err("%s: can't find dsi_esc_clk. ret=%d\n",
+			__func__, ret);
+		msm_host->esc_clk = NULL;
+		goto exit;
+	}
+
+exit:
+	return ret;
+}
+
+static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
+{
+	int ret;
+
+	DBG("id=%d", msm_host->id);
+
+	ret = clk_prepare_enable(msm_host->mdp_core_clk);
+	if (ret) {
+		pr_err("%s: failed to enable mdp_core_clock, %d\n",
+							 __func__, ret);
+		goto core_clk_err;
+	}
+
+	ret = clk_prepare_enable(msm_host->ahb_clk);
+	if (ret) {
+		pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
+		goto ahb_clk_err;
+	}
+
+	ret = clk_prepare_enable(msm_host->axi_clk);
+	if (ret) {
+		pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret);
+		goto axi_clk_err;
+	}
+
+	ret = clk_prepare_enable(msm_host->mmss_misc_ahb_clk);
+	if (ret) {
+		pr_err("%s: failed to enable mmss misc ahb clk, %d\n",
+			__func__, ret);
+		goto misc_ahb_clk_err;
+	}
+
+	return 0;
+
+misc_ahb_clk_err:
+	clk_disable_unprepare(msm_host->axi_clk);
+axi_clk_err:
+	clk_disable_unprepare(msm_host->ahb_clk);
+ahb_clk_err:
+	clk_disable_unprepare(msm_host->mdp_core_clk);
+core_clk_err:
+	return ret;
+}
+
+static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
+{
+	DBG("");
+	clk_disable_unprepare(msm_host->mmss_misc_ahb_clk);
+	clk_disable_unprepare(msm_host->axi_clk);
+	clk_disable_unprepare(msm_host->ahb_clk);
+	clk_disable_unprepare(msm_host->mdp_core_clk);
+}
+
+static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
+{
+	int ret;
+
+	DBG("Set clk rates: pclk=%d, byteclk=%d",
+		msm_host->mode->clock, msm_host->byte_clk_rate);
+
+	ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
+	if (ret) {
+		pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
+		goto error;
+	}
+
+	ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
+	if (ret) {
+		pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
+		goto error;
+	}
+
+	ret = clk_prepare_enable(msm_host->esc_clk);
+	if (ret) {
+		pr_err("%s: Failed to enable dsi esc clk\n", __func__);
+		goto error;
+	}
+
+	ret = clk_prepare_enable(msm_host->byte_clk);
+	if (ret) {
+		pr_err("%s: Failed to enable dsi byte clk\n", __func__);
+		goto byte_clk_err;
+	}
+
+	ret = clk_prepare_enable(msm_host->pixel_clk);
+	if (ret) {
+		pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
+		goto pixel_clk_err;
+	}
+
+	return 0;
+
+pixel_clk_err:
+	clk_disable_unprepare(msm_host->byte_clk);
+byte_clk_err:
+	clk_disable_unprepare(msm_host->esc_clk);
+error:
+	return ret;
+}
+
+static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
+{
+	clk_disable_unprepare(msm_host->esc_clk);
+	clk_disable_unprepare(msm_host->pixel_clk);
+	clk_disable_unprepare(msm_host->byte_clk);
+}
+
+static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
+{
+	int ret = 0;
+
+	mutex_lock(&msm_host->clk_mutex);
+	if (enable) {
+		ret = dsi_bus_clk_enable(msm_host);
+		if (ret) {
+			pr_err("%s: Can not enable bus clk, %d\n",
+				__func__, ret);
+			goto unlock_ret;
+		}
+		ret = dsi_link_clk_enable(msm_host);
+		if (ret) {
+			pr_err("%s: Can not enable link clk, %d\n",
+				__func__, ret);
+			dsi_bus_clk_disable(msm_host);
+			goto unlock_ret;
+		}
+	} else {
+		dsi_link_clk_disable(msm_host);
+		dsi_bus_clk_disable(msm_host);
+	}
+
+unlock_ret:
+	mutex_unlock(&msm_host->clk_mutex);
+	return ret;
+}
+
+static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
+{
+	struct drm_display_mode *mode = msm_host->mode;
+	u8 lanes = msm_host->lanes;
+	u32 bpp = dsi_get_bpp(msm_host->format);
+	u32 pclk_rate;
+
+	if (!mode) {
+		pr_err("%s: mode not set\n", __func__);
+		return -EINVAL;
+	}
+
+	pclk_rate = mode->clock * 1000;
+	if (lanes > 0) {
+		msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes);
+	} else {
+		pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
+		msm_host->byte_clk_rate = (pclk_rate * bpp) / 8;
+	}
+
+	DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
+
+	return 0;
+}
+
+static void dsi_phy_sw_reset(struct msm_dsi_host *msm_host)
+{
+	DBG("");
+	dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
+	/* Make sure fully reset */
+	wmb();
+	udelay(1000);
+	dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
+	udelay(100);
+}
+
+static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
+{
+	u32 intr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&msm_host->intr_lock, flags);
+	intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
+
+	if (enable)
+		intr |= mask;
+	else
+		intr &= ~mask;
+
+	DBG("intr=%x enable=%d", intr, enable);
+
+	dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
+	spin_unlock_irqrestore(&msm_host->intr_lock, flags);
+}
+
+static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
+{
+	if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+		return BURST_MODE;
+	else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+		return NON_BURST_SYNCH_PULSE;
+
+	return NON_BURST_SYNCH_EVENT;
+}
+
+static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
+				const enum mipi_dsi_pixel_format mipi_fmt)
+{
+	switch (mipi_fmt) {
+	case MIPI_DSI_FMT_RGB888:	return VID_DST_FORMAT_RGB888;
+	case MIPI_DSI_FMT_RGB666:	return VID_DST_FORMAT_RGB666_LOOSE;
+	case MIPI_DSI_FMT_RGB666_PACKED:	return VID_DST_FORMAT_RGB666;
+	case MIPI_DSI_FMT_RGB565:	return VID_DST_FORMAT_RGB565;
+	default:			return VID_DST_FORMAT_RGB888;
+	}
+}
+
+static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
+				const enum mipi_dsi_pixel_format mipi_fmt)
+{
+	switch (mipi_fmt) {
+	case MIPI_DSI_FMT_RGB888:	return CMD_DST_FORMAT_RGB888;
+	case MIPI_DSI_FMT_RGB666_PACKED:
+	case MIPI_DSI_FMT_RGB666:	return VID_DST_FORMAT_RGB666;
+	case MIPI_DSI_FMT_RGB565:	return CMD_DST_FORMAT_RGB565;
+	default:			return CMD_DST_FORMAT_RGB888;
+	}
+}
+
+static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
+				u32 clk_pre, u32 clk_post)
+{
+	u32 flags = msm_host->mode_flags;
+	enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
+	u32 data = 0;
+
+	if (!enable) {
+		dsi_write(msm_host, REG_DSI_CTRL, 0);
+		return;
+	}
+
+	if (flags & MIPI_DSI_MODE_VIDEO) {
+		if (flags & MIPI_DSI_MODE_VIDEO_HSE)
+			data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
+		if (flags & MIPI_DSI_MODE_VIDEO_HFP)
+			data |= DSI_VID_CFG0_HFP_POWER_STOP;
+		if (flags & MIPI_DSI_MODE_VIDEO_HBP)
+			data |= DSI_VID_CFG0_HBP_POWER_STOP;
+		if (flags & MIPI_DSI_MODE_VIDEO_HSA)
+			data |= DSI_VID_CFG0_HSA_POWER_STOP;
+		/* Always set low power stop mode for BLLP
+		 * to let command engine send packets
+		 */
+		data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
+			DSI_VID_CFG0_BLLP_POWER_STOP;
+		data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
+		data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
+		data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
+		dsi_write(msm_host, REG_DSI_VID_CFG0, data);
+
+		/* Do not swap RGB colors */
+		data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
+		dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
+	} else {
+		/* Do not swap RGB colors */
+		data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
+		data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
+		dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
+
+		data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
+			DSI_CMD_CFG1_WR_MEM_CONTINUE(
+					MIPI_DCS_WRITE_MEMORY_CONTINUE);
+		/* Always insert DCS command */
+		data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
+		dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
+	}
+
+	dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
+			DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
+			DSI_CMD_DMA_CTRL_LOW_POWER);
+
+	data = 0;
+	/* Always assume dedicated TE pin */
+	data |= DSI_TRIG_CTRL_TE;
+	data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
+	data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
+	data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
+	if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
+		(msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
+		data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
+	dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
+
+	data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(clk_post) |
+		DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(clk_pre);
+	dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
+
+	data = 0;
+	if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
+		data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
+	dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
+
+	/* allow only ack-err-status to generate interrupt */
+	dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
+
+	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
+
+	dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
+
+	data = DSI_CTRL_CLK_EN;
+
+	DBG("lane number=%d", msm_host->lanes);
+	if (msm_host->lanes == 2) {
+		data |= DSI_CTRL_LANE1 | DSI_CTRL_LANE2;
+		/* swap lanes for 2-lane panel for better performance */
+		dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
+			DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_1230));
+	} else {
+		/* Take 4 lanes as default */
+		data |= DSI_CTRL_LANE0 | DSI_CTRL_LANE1 | DSI_CTRL_LANE2 |
+			DSI_CTRL_LANE3;
+		/* Do not swap lanes for 4-lane panel */
+		dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
+			DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_0123));
+	}
+	data |= DSI_CTRL_ENABLE;
+
+	dsi_write(msm_host, REG_DSI_CTRL, data);
+}
+
+static void dsi_timing_setup(struct msm_dsi_host *msm_host)
+{
+	struct drm_display_mode *mode = msm_host->mode;
+	u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
+	u32 h_total = mode->htotal;
+	u32 v_total = mode->vtotal;
+	u32 hs_end = mode->hsync_end - mode->hsync_start;
+	u32 vs_end = mode->vsync_end - mode->vsync_start;
+	u32 ha_start = h_total - mode->hsync_start;
+	u32 ha_end = ha_start + mode->hdisplay;
+	u32 va_start = v_total - mode->vsync_start;
+	u32 va_end = va_start + mode->vdisplay;
+	u32 wc;
+
+	DBG("");
+
+	if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
+		dsi_write(msm_host, REG_DSI_ACTIVE_H,
+			DSI_ACTIVE_H_START(ha_start) |
+			DSI_ACTIVE_H_END(ha_end));
+		dsi_write(msm_host, REG_DSI_ACTIVE_V,
+			DSI_ACTIVE_V_START(va_start) |
+			DSI_ACTIVE_V_END(va_end));
+		dsi_write(msm_host, REG_DSI_TOTAL,
+			DSI_TOTAL_H_TOTAL(h_total - 1) |
+			DSI_TOTAL_V_TOTAL(v_total - 1));
+
+		dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
+			DSI_ACTIVE_HSYNC_START(hs_start) |
+			DSI_ACTIVE_HSYNC_END(hs_end));
+		dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
+		dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
+			DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
+			DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
+	} else {		/* command mode */
+		/* image data and 1 byte write_memory_start cmd */
+		wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
+
+		dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
+			DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
+			DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(
+					msm_host->channel) |
+			DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(
+					MIPI_DSI_DCS_LONG_WRITE));
+
+		dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
+			DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) |
+			DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
+	}
+}
+
+static void dsi_sw_reset(struct msm_dsi_host *msm_host)
+{
+	dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
+	wmb(); /* clocks need to be enabled before reset */
+
+	dsi_write(msm_host, REG_DSI_RESET, 1);
+	wmb(); /* make sure reset happen */
+	dsi_write(msm_host, REG_DSI_RESET, 0);
+}
+
+static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
+					bool video_mode, bool enable)
+{
+	u32 dsi_ctrl;
+
+	dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
+
+	if (!enable) {
+		dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
+				DSI_CTRL_CMD_MODE_EN);
+		dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
+					DSI_IRQ_MASK_VIDEO_DONE, 0);
+	} else {
+		if (video_mode) {
+			dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
+		} else {		/* command mode */
+			dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
+			dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
+		}
+		dsi_ctrl |= DSI_CTRL_ENABLE;
+	}
+
+	dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
+}
+
+static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
+{
+	u32 data;
+
+	data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
+
+	if (mode == 0)
+		data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
+	else
+		data |= DSI_CMD_DMA_CTRL_LOW_POWER;
+
+	dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
+}
+
+static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
+{
+	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
+
+	reinit_completion(&msm_host->video_comp);
+
+	wait_for_completion_timeout(&msm_host->video_comp,
+			msecs_to_jiffies(70));
+
+	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
+}
+
+static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
+{
+	if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
+		return;
+
+	if (msm_host->power_on) {
+		dsi_wait4video_done(msm_host);
+		/* delay 4 ms to skip BLLP */
+		usleep_range(2000, 4000);
+	}
+}
+
+/* dsi_cmd */
+static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
+{
+	struct drm_device *dev = msm_host->dev;
+	int ret;
+	u32 iova;
+
+	mutex_lock(&dev->struct_mutex);
+	msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
+	if (IS_ERR(msm_host->tx_gem_obj)) {
+		ret = PTR_ERR(msm_host->tx_gem_obj);
+		pr_err("%s: failed to allocate gem, %d\n", __func__, ret);
+		msm_host->tx_gem_obj = NULL;
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
+	if (ret) {
+		pr_err("%s: failed to get iova, %d\n", __func__, ret);
+		return ret;
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	if (iova & 0x07) {
+		pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
+{
+	struct drm_device *dev = msm_host->dev;
+
+	if (msm_host->tx_gem_obj) {
+		msm_gem_put_iova(msm_host->tx_gem_obj, 0);
+		mutex_lock(&dev->struct_mutex);
+		msm_gem_free_object(msm_host->tx_gem_obj);
+		msm_host->tx_gem_obj = NULL;
+		mutex_unlock(&dev->struct_mutex);
+	}
+}
+
+/*
+ * prepare cmd buffer to be txed
+ */
+static int dsi_cmd_dma_add(struct drm_gem_object *tx_gem,
+			const struct mipi_dsi_msg *msg)
+{
+	struct mipi_dsi_packet packet;
+	int len;
+	int ret;
+	u8 *data;
+
+	ret = mipi_dsi_create_packet(&packet, msg);
+	if (ret) {
+		pr_err("%s: create packet failed, %d\n", __func__, ret);
+		return ret;
+	}
+	len = (packet.size + 3) & (~0x3);
+
+	if (len > tx_gem->size) {
+		pr_err("%s: packet size is too big\n", __func__);
+		return -EINVAL;
+	}
+
+	data = msm_gem_vaddr(tx_gem);
+
+	if (IS_ERR(data)) {
+		ret = PTR_ERR(data);
+		pr_err("%s: get vaddr failed, %d\n", __func__, ret);
+		return ret;
+	}
+
+	/* MSM specific command format in memory */
+	data[0] = packet.header[1];
+	data[1] = packet.header[2];
+	data[2] = packet.header[0];
+	data[3] = BIT(7); /* Last packet */
+	if (mipi_dsi_packet_format_is_long(msg->type))
+		data[3] |= BIT(6);
+	if (msg->rx_buf && msg->rx_len)
+		data[3] |= BIT(5);
+
+	/* Long packet */
+	if (packet.payload && packet.payload_length)
+		memcpy(data + 4, packet.payload, packet.payload_length);
+
+	/* Append 0xff to the end */
+	if (packet.size < len)
+		memset(data + packet.size, 0xff, len - packet.size);
+
+	return len;
+}
+
+/*
+ * dsi_short_read1_resp: 1 parameter
+ */
+static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
+{
+	u8 *data = msg->rx_buf;
+	if (data && (msg->rx_len >= 1)) {
+		*data = buf[1]; /* strip out dcs type */
+		return 1;
+	} else {
+		pr_err("%s: read data does not match with rx_buf len %d\n",
+			__func__, msg->rx_len);
+		return -EINVAL;
+	}
+}
+
+/*
+ * dsi_short_read2_resp: 2 parameter
+ */
+static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
+{
+	u8 *data = msg->rx_buf;
+	if (data && (msg->rx_len >= 2)) {
+		data[0] = buf[1]; /* strip out dcs type */
+		data[1] = buf[2];
+		return 2;
+	} else {
+		pr_err("%s: read data does not match with rx_buf len %d\n",
+			__func__, msg->rx_len);
+		return -EINVAL;
+	}
+}
+
+static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
+{
+	/* strip out 4 byte dcs header */
+	if (msg->rx_buf && msg->rx_len)
+		memcpy(msg->rx_buf, buf + 4, msg->rx_len);
+
+	return msg->rx_len;
+}
+
+
+static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
+{
+	int ret;
+	u32 iova;
+	bool triggered;
+
+	ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova);
+	if (ret) {
+		pr_err("%s: failed to get iova: %d\n", __func__, ret);
+		return ret;
+	}
+
+	reinit_completion(&msm_host->dma_comp);
+
+	dsi_wait4video_eng_busy(msm_host);
+
+	triggered = msm_dsi_manager_cmd_xfer_trigger(
+						msm_host->id, iova, len);
+	if (triggered) {
+		ret = wait_for_completion_timeout(&msm_host->dma_comp,
+					msecs_to_jiffies(200));
+		DBG("ret=%d", ret);
+		if (ret == 0)
+			ret = -ETIMEDOUT;
+		else
+			ret = len;
+	} else
+		ret = len;
+
+	return ret;
+}
+
+static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
+			u8 *buf, int rx_byte, int pkt_size)
+{
+	u32 *lp, *temp, data;
+	int i, j = 0, cnt;
+	bool ack_error = false;
+	u32 read_cnt;
+	u8 reg[16];
+	int repeated_bytes = 0;
+	int buf_offset = buf - msm_host->rx_buf;
+
+	lp = (u32 *)buf;
+	temp = (u32 *)reg;
+	cnt = (rx_byte + 3) >> 2;
+	if (cnt > 4)
+		cnt = 4; /* 4 x 32 bits registers only */
+
+	/* Calculate real read data count */
+	read_cnt = dsi_read(msm_host, 0x1d4) >> 16;
+
+	ack_error = (rx_byte == 4) ?
+		(read_cnt == 8) : /* short pkt + 4-byte error pkt */
+		(read_cnt == (pkt_size + 6 + 4)); /* long pkt+4-byte error pkt*/
+
+	if (ack_error)
+		read_cnt -= 4; /* Remove 4 byte error pkt */
+
+	/*
+	 * In case of multiple reads from the panel, after the first read, there
+	 * is possibility that there are some bytes in the payload repeating in
+	 * the RDBK_DATA registers. Since we read all the parameters from the
+	 * panel right from the first byte for every pass. We need to skip the
+	 * repeating bytes and then append the new parameters to the rx buffer.
+	 */
+	if (read_cnt > 16) {
+		int bytes_shifted;
+		/* Any data more than 16 bytes will be shifted out.
+		 * The temp read buffer should already contain these bytes.
+		 * The remaining bytes in read buffer are the repeated bytes.
+		 */
+		bytes_shifted = read_cnt - 16;
+		repeated_bytes = buf_offset - bytes_shifted;
+	}
+
+	for (i = cnt - 1; i >= 0; i--) {
+		data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
+		*temp++ = ntohl(data); /* to host byte order */
+		DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
+	}
+
+	for (i = repeated_bytes; i < 16; i++)
+		buf[j++] = reg[i];
+
+	return j;
+}
+
+static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
+				const struct mipi_dsi_msg *msg)
+{
+	int len, ret;
+	int bllp_len = msm_host->mode->hdisplay *
+			dsi_get_bpp(msm_host->format) / 8;
+
+	len = dsi_cmd_dma_add(msm_host->tx_gem_obj, msg);
+	if (!len) {
+		pr_err("%s: failed to add cmd type = 0x%x\n",
+			__func__,  msg->type);
+		return -EINVAL;
+	}
+
+	/* for video mode, do not send cmds more than
+	* one pixel line, since it only transmit it
+	* during BLLP.
+	*/
+	/* TODO: if the command is sent in LP mode, the bit rate is only
+	 * half of esc clk rate. In this case, if the video is already
+	 * actively streaming, we need to check more carefully if the
+	 * command can be fit into one BLLP.
+	 */
+	if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
+		pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
+			__func__, len);
+		return -EINVAL;
+	}
+
+	ret = dsi_cmd_dma_tx(msm_host, len);
+	if (ret < len) {
+		pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
+			__func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
+		return -ECOMM;
+	}
+
+	return len;
+}
+
+static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
+{
+	u32 data0, data1;
+
+	data0 = dsi_read(msm_host, REG_DSI_CTRL);
+	data1 = data0;
+	data1 &= ~DSI_CTRL_ENABLE;
+	dsi_write(msm_host, REG_DSI_CTRL, data1);
+	/*
+	 * dsi controller need to be disabled before
+	 * clocks turned on
+	 */
+	wmb();
+
+	dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
+	wmb();	/* make sure clocks enabled */
+
+	/* dsi controller can only be reset while clocks are running */
+	dsi_write(msm_host, REG_DSI_RESET, 1);
+	wmb();	/* make sure reset happen */
+	dsi_write(msm_host, REG_DSI_RESET, 0);
+	wmb();	/* controller out of reset */
+	dsi_write(msm_host, REG_DSI_CTRL, data0);
+	wmb();	/* make sure dsi controller enabled again */
+}
+
+static void dsi_err_worker(struct work_struct *work)
+{
+	struct msm_dsi_host *msm_host =
+		container_of(work, struct msm_dsi_host, err_work);
+	u32 status = msm_host->err_work_state;
+
+	pr_err("%s: status=%x\n", __func__, status);
+	if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
+		dsi_sw_reset_restore(msm_host);
+
+	/* It is safe to clear here because error irq is disabled. */
+	msm_host->err_work_state = 0;
+
+	/* enable dsi error interrupt */
+	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
+}
+
+static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
+{
+	u32 status;
+
+	status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
+
+	if (status) {
+		dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
+		/* Writing of an extra 0 needed to clear error bits */
+		dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
+		msm_host->err_work_state |= DSI_ERR_STATE_ACK;
+	}
+}
+
+static void dsi_timeout_status(struct msm_dsi_host *msm_host)
+{
+	u32 status;
+
+	status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
+
+	if (status) {
+		dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
+		msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
+	}
+}
+
+static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
+{
+	u32 status;
+
+	status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
+
+	if (status) {
+		dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
+		msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
+	}
+}
+
+static void dsi_fifo_status(struct msm_dsi_host *msm_host)
+{
+	u32 status;
+
+	status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
+
+	/* fifo underflow, overflow */
+	if (status) {
+		dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
+		msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
+		if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
+			msm_host->err_work_state |=
+					DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
+	}
+}
+
+static void dsi_status(struct msm_dsi_host *msm_host)
+{
+	u32 status;
+
+	status = dsi_read(msm_host, REG_DSI_STATUS0);
+
+	if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
+		dsi_write(msm_host, REG_DSI_STATUS0, status);
+		msm_host->err_work_state |=
+			DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
+	}
+}
+
+static void dsi_clk_status(struct msm_dsi_host *msm_host)
+{
+	u32 status;
+
+	status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
+
+	if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
+		dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
+		msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
+	}
+}
+
+static void dsi_error(struct msm_dsi_host *msm_host)
+{
+	/* disable dsi error interrupt */
+	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
+
+	dsi_clk_status(msm_host);
+	dsi_fifo_status(msm_host);
+	dsi_ack_err_status(msm_host);
+	dsi_timeout_status(msm_host);
+	dsi_status(msm_host);
+	dsi_dln0_phy_err(msm_host);
+
+	queue_work(msm_host->workqueue, &msm_host->err_work);
+}
+
+static irqreturn_t dsi_host_irq(int irq, void *ptr)
+{
+	struct msm_dsi_host *msm_host = ptr;
+	u32 isr;
+	unsigned long flags;
+
+	if (!msm_host->ctrl_base)
+		return IRQ_HANDLED;
+
+	spin_lock_irqsave(&msm_host->intr_lock, flags);
+	isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
+	dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
+	spin_unlock_irqrestore(&msm_host->intr_lock, flags);
+
+	DBG("isr=0x%x, id=%d", isr, msm_host->id);
+
+	if (isr & DSI_IRQ_ERROR)
+		dsi_error(msm_host);
+
+	if (isr & DSI_IRQ_VIDEO_DONE)
+		complete(&msm_host->video_comp);
+
+	if (isr & DSI_IRQ_CMD_DMA_DONE)
+		complete(&msm_host->dma_comp);
+
+	return IRQ_HANDLED;
+}
+
+static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
+			struct device *panel_device)
+{
+	int ret;
+
+	msm_host->disp_en_gpio = devm_gpiod_get(panel_device,
+						"disp-enable");
+	if (IS_ERR(msm_host->disp_en_gpio)) {
+		DBG("cannot get disp-enable-gpios %ld",
+				PTR_ERR(msm_host->disp_en_gpio));
+		msm_host->disp_en_gpio = NULL;
+	}
+	if (msm_host->disp_en_gpio) {
+		ret = gpiod_direction_output(msm_host->disp_en_gpio, 0);
+		if (ret) {
+			pr_err("cannot set dir to disp-en-gpios %d\n", ret);
+			return ret;
+		}
+	}
+
+	msm_host->te_gpio = devm_gpiod_get(panel_device, "disp-te");
+	if (IS_ERR(msm_host->te_gpio)) {
+		DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
+		msm_host->te_gpio = NULL;
+	}
+
+	if (msm_host->te_gpio) {
+		ret = gpiod_direction_input(msm_host->te_gpio);
+		if (ret) {
+			pr_err("%s: cannot set dir to disp-te-gpios, %d\n",
+				__func__, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int dsi_host_attach(struct mipi_dsi_host *host,
+					struct mipi_dsi_device *dsi)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+	int ret;
+
+	msm_host->channel = dsi->channel;
+	msm_host->lanes = dsi->lanes;
+	msm_host->format = dsi->format;
+	msm_host->mode_flags = dsi->mode_flags;
+
+	msm_host->panel_node = dsi->dev.of_node;
+
+	/* Some gpios defined in panel DT need to be controlled by host */
+	ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
+	if (ret)
+		return ret;
+
+	DBG("id=%d", msm_host->id);
+	if (msm_host->dev)
+		drm_helper_hpd_irq_event(msm_host->dev);
+
+	return 0;
+}
+
+static int dsi_host_detach(struct mipi_dsi_host *host,
+					struct mipi_dsi_device *dsi)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	msm_host->panel_node = NULL;
+
+	DBG("id=%d", msm_host->id);
+	if (msm_host->dev)
+		drm_helper_hpd_irq_event(msm_host->dev);
+
+	return 0;
+}
+
+static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
+					const struct mipi_dsi_msg *msg)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+	int ret;
+
+	if (!msg || !msm_host->power_on)
+		return -EINVAL;
+
+	mutex_lock(&msm_host->cmd_mutex);
+	ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
+	mutex_unlock(&msm_host->cmd_mutex);
+
+	return ret;
+}
+
+static struct mipi_dsi_host_ops dsi_host_ops = {
+	.attach = dsi_host_attach,
+	.detach = dsi_host_detach,
+	.transfer = dsi_host_transfer,
+};
+
+int msm_dsi_host_init(struct msm_dsi *msm_dsi)
+{
+	struct msm_dsi_host *msm_host = NULL;
+	struct platform_device *pdev = msm_dsi->pdev;
+	int ret;
+
+	msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
+	if (!msm_host) {
+		pr_err("%s: FAILED: cannot alloc dsi host\n",
+		       __func__);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	ret = of_property_read_u32(pdev->dev.of_node,
+				"qcom,dsi-host-index", &msm_host->id);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"%s: host index not specified, ret=%d\n",
+			__func__, ret);
+		goto fail;
+	}
+	msm_host->pdev = pdev;
+
+	ret = dsi_clk_init(msm_host);
+	if (ret) {
+		pr_err("%s: unable to initialize dsi clks\n", __func__);
+		goto fail;
+	}
+
+	msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
+	if (IS_ERR(msm_host->ctrl_base)) {
+		pr_err("%s: unable to map Dsi ctrl base\n", __func__);
+		ret = PTR_ERR(msm_host->ctrl_base);
+		goto fail;
+	}
+
+	msm_host->cfg = dsi_get_config(msm_host);
+	if (!msm_host->cfg) {
+		ret = -EINVAL;
+		pr_err("%s: get config failed\n", __func__);
+		goto fail;
+	}
+
+	ret = dsi_regulator_init(msm_host);
+	if (ret) {
+		pr_err("%s: regulator init failed\n", __func__);
+		goto fail;
+	}
+
+	msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
+	if (!msm_host->rx_buf) {
+		pr_err("%s: alloc rx temp buf failed\n", __func__);
+		goto fail;
+	}
+
+	init_completion(&msm_host->dma_comp);
+	init_completion(&msm_host->video_comp);
+	mutex_init(&msm_host->dev_mutex);
+	mutex_init(&msm_host->cmd_mutex);
+	mutex_init(&msm_host->clk_mutex);
+	spin_lock_init(&msm_host->intr_lock);
+
+	/* setup workqueue */
+	msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
+	INIT_WORK(&msm_host->err_work, dsi_err_worker);
+
+	msm_dsi->phy = msm_dsi_phy_init(pdev, msm_host->cfg->phy_type,
+					msm_host->id);
+	if (!msm_dsi->phy) {
+		ret = -EINVAL;
+		pr_err("%s: phy init failed\n", __func__);
+		goto fail;
+	}
+	msm_dsi->host = &msm_host->base;
+	msm_dsi->id = msm_host->id;
+
+	DBG("Dsi Host %d initialized", msm_host->id);
+	return 0;
+
+fail:
+	return ret;
+}
+
+void msm_dsi_host_destroy(struct mipi_dsi_host *host)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	DBG("");
+	dsi_tx_buf_free(msm_host);
+	if (msm_host->workqueue) {
+		flush_workqueue(msm_host->workqueue);
+		destroy_workqueue(msm_host->workqueue);
+		msm_host->workqueue = NULL;
+	}
+
+	mutex_destroy(&msm_host->clk_mutex);
+	mutex_destroy(&msm_host->cmd_mutex);
+	mutex_destroy(&msm_host->dev_mutex);
+}
+
+int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
+					struct drm_device *dev)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+	struct platform_device *pdev = msm_host->pdev;
+	int ret;
+
+	msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+	if (msm_host->irq < 0) {
+		ret = msm_host->irq;
+		dev_err(dev->dev, "failed to get irq: %d\n", ret);
+		return ret;
+	}
+
+	ret = devm_request_irq(&pdev->dev, msm_host->irq,
+			dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+			"dsi_isr", msm_host);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
+				msm_host->irq, ret);
+		return ret;
+	}
+
+	msm_host->dev = dev;
+	ret = dsi_tx_buf_alloc(msm_host, SZ_4K);
+	if (ret) {
+		pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+	struct device_node *node;
+	int ret;
+
+	/* Register mipi dsi host */
+	if (!msm_host->registered) {
+		host->dev = &msm_host->pdev->dev;
+		host->ops = &dsi_host_ops;
+		ret = mipi_dsi_host_register(host);
+		if (ret)
+			return ret;
+
+		msm_host->registered = true;
+
+		/* If the panel driver has not been probed after host register,
+		 * we should defer the host's probe.
+		 * It makes sure panel is connected when fbcon detects
+		 * connector status and gets the proper display mode to
+		 * create framebuffer.
+		 */
+		if (check_defer) {
+			node = of_get_child_by_name(msm_host->pdev->dev.of_node,
+							"panel");
+			if (node) {
+				if (!of_drm_find_panel(node))
+					return -EPROBE_DEFER;
+			}
+		}
+	}
+
+	return 0;
+}
+
+void msm_dsi_host_unregister(struct mipi_dsi_host *host)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	if (msm_host->registered) {
+		mipi_dsi_host_unregister(host);
+		host->dev = NULL;
+		host->ops = NULL;
+		msm_host->registered = false;
+	}
+}
+
+int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
+				const struct mipi_dsi_msg *msg)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	/* TODO: make sure dsi_cmd_mdp is idle.
+	 * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
+	 * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
+	 * How to handle the old versions? Wait for mdp cmd done?
+	 */
+
+	/*
+	 * mdss interrupt is generated in mdp core clock domain
+	 * mdp clock need to be enabled to receive dsi interrupt
+	 */
+	dsi_clk_ctrl(msm_host, 1);
+
+	/* TODO: vote for bus bandwidth */
+
+	if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
+		dsi_set_tx_power_mode(0, msm_host);
+
+	msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
+	dsi_write(msm_host, REG_DSI_CTRL,
+		msm_host->dma_cmd_ctrl_restore |
+		DSI_CTRL_CMD_MODE_EN |
+		DSI_CTRL_ENABLE);
+	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
+
+	return 0;
+}
+
+void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
+				const struct mipi_dsi_msg *msg)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
+	dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
+
+	if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
+		dsi_set_tx_power_mode(1, msm_host);
+
+	/* TODO: unvote for bus bandwidth */
+
+	dsi_clk_ctrl(msm_host, 0);
+}
+
+int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
+				const struct mipi_dsi_msg *msg)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	return dsi_cmds2buf_tx(msm_host, msg);
+}
+
+int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
+				const struct mipi_dsi_msg *msg)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+	int data_byte, rx_byte, dlen, end;
+	int short_response, diff, pkt_size, ret = 0;
+	char cmd;
+	int rlen = msg->rx_len;
+	u8 *buf;
+
+	if (rlen <= 2) {
+		short_response = 1;
+		pkt_size = rlen;
+		rx_byte = 4;
+	} else {
+		short_response = 0;
+		data_byte = 10;	/* first read */
+		if (rlen < data_byte)
+			pkt_size = rlen;
+		else
+			pkt_size = data_byte;
+		rx_byte = data_byte + 6; /* 4 header + 2 crc */
+	}
+
+	buf = msm_host->rx_buf;
+	end = 0;
+	while (!end) {
+		u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
+		struct mipi_dsi_msg max_pkt_size_msg = {
+			.channel = msg->channel,
+			.type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
+			.tx_len = 2,
+			.tx_buf = tx,
+		};
+
+		DBG("rlen=%d pkt_size=%d rx_byte=%d",
+			rlen, pkt_size, rx_byte);
+
+		ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
+		if (ret < 2) {
+			pr_err("%s: Set max pkt size failed, %d\n",
+				__func__, ret);
+			return -EINVAL;
+		}
+
+		if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
+			(msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
+			/* Clear the RDBK_DATA registers */
+			dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
+					DSI_RDBK_DATA_CTRL_CLR);
+			wmb(); /* make sure the RDBK registers are cleared */
+			dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
+			wmb(); /* release cleared status before transfer */
+		}
+
+		ret = dsi_cmds2buf_tx(msm_host, msg);
+		if (ret < msg->tx_len) {
+			pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
+			return ret;
+		}
+
+		/*
+		 * once cmd_dma_done interrupt received,
+		 * return data from client is ready and stored
+		 * at RDBK_DATA register already
+		 * since rx fifo is 16 bytes, dcs header is kept at first loop,
+		 * after that dcs header lost during shift into registers
+		 */
+		dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
+
+		if (dlen <= 0)
+			return 0;
+
+		if (short_response)
+			break;
+
+		if (rlen <= data_byte) {
+			diff = data_byte - rlen;
+			end = 1;
+		} else {
+			diff = 0;
+			rlen -= data_byte;
+		}
+
+		if (!end) {
+			dlen -= 2; /* 2 crc */
+			dlen -= diff;
+			buf += dlen;	/* next start position */
+			data_byte = 14;	/* NOT first read */
+			if (rlen < data_byte)
+				pkt_size += rlen;
+			else
+				pkt_size += data_byte;
+			DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
+		}
+	}
+
+	/*
+	 * For single Long read, if the requested rlen < 10,
+	 * we need to shift the start position of rx
+	 * data buffer to skip the bytes which are not
+	 * updated.
+	 */
+	if (pkt_size < 10 && !short_response)
+		buf = msm_host->rx_buf + (10 - rlen);
+	else
+		buf = msm_host->rx_buf;
+
+	cmd = buf[0];
+	switch (cmd) {
+	case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+		pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
+		ret = 0;
+	case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+	case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+		ret = dsi_short_read1_resp(buf, msg);
+		break;
+	case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+	case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+		ret = dsi_short_read2_resp(buf, msg);
+		break;
+	case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
+	case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
+		ret = dsi_long_read_resp(buf, msg);
+		break;
+	default:
+		pr_warn("%s:Invalid response cmd\n", __func__);
+		ret = 0;
+	}
+
+	return ret;
+}
+
+void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	dsi_write(msm_host, REG_DSI_DMA_BASE, iova);
+	dsi_write(msm_host, REG_DSI_DMA_LEN, len);
+	dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
+
+	/* Make sure trigger happens */
+	wmb();
+}
+
+int msm_dsi_host_enable(struct mipi_dsi_host *host)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	dsi_op_mode_config(msm_host,
+		!!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
+
+	/* TODO: clock should be turned off for command mode,
+	 * and only turned on before MDP START.
+	 * This part of code should be enabled once mdp driver support it.
+	 */
+	/* if (msm_panel->mode == MSM_DSI_CMD_MODE)
+		dsi_clk_ctrl(msm_host, 0); */
+
+	return 0;
+}
+
+int msm_dsi_host_disable(struct mipi_dsi_host *host)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	dsi_op_mode_config(msm_host,
+		!!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
+
+	/* Since we have disabled INTF, the video engine won't stop so that
+	 * the cmd engine will be blocked.
+	 * Reset to disable video engine so that we can send off cmd.
+	 */
+	dsi_sw_reset(msm_host);
+
+	return 0;
+}
+
+int msm_dsi_host_power_on(struct mipi_dsi_host *host)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+	u32 clk_pre = 0, clk_post = 0;
+	int ret = 0;
+
+	mutex_lock(&msm_host->dev_mutex);
+	if (msm_host->power_on) {
+		DBG("dsi host already on");
+		goto unlock_ret;
+	}
+
+	ret = dsi_calc_clk_rate(msm_host);
+	if (ret) {
+		pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
+		goto unlock_ret;
+	}
+
+	ret = dsi_host_regulator_enable(msm_host);
+	if (ret) {
+		pr_err("%s:Failed to enable vregs.ret=%d\n",
+			__func__, ret);
+		goto unlock_ret;
+	}
+
+	ret = dsi_bus_clk_enable(msm_host);
+	if (ret) {
+		pr_err("%s: failed to enable bus clocks, %d\n", __func__, ret);
+		goto fail_disable_reg;
+	}
+
+	dsi_phy_sw_reset(msm_host);
+	ret = msm_dsi_manager_phy_enable(msm_host->id,
+					msm_host->byte_clk_rate * 8,
+					clk_get_rate(msm_host->esc_clk),
+					&clk_pre, &clk_post);
+	dsi_bus_clk_disable(msm_host);
+	if (ret) {
+		pr_err("%s: failed to enable phy, %d\n", __func__, ret);
+		goto fail_disable_reg;
+	}
+
+	ret = dsi_clk_ctrl(msm_host, 1);
+	if (ret) {
+		pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret);
+		goto fail_disable_reg;
+	}
+
+	dsi_timing_setup(msm_host);
+	dsi_sw_reset(msm_host);
+	dsi_ctrl_config(msm_host, true, clk_pre, clk_post);
+
+	if (msm_host->disp_en_gpio)
+		gpiod_set_value(msm_host->disp_en_gpio, 1);
+
+	msm_host->power_on = true;
+	mutex_unlock(&msm_host->dev_mutex);
+
+	return 0;
+
+fail_disable_reg:
+	dsi_host_regulator_disable(msm_host);
+unlock_ret:
+	mutex_unlock(&msm_host->dev_mutex);
+	return ret;
+}
+
+int msm_dsi_host_power_off(struct mipi_dsi_host *host)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	mutex_lock(&msm_host->dev_mutex);
+	if (!msm_host->power_on) {
+		DBG("dsi host already off");
+		goto unlock_ret;
+	}
+
+	dsi_ctrl_config(msm_host, false, 0, 0);
+
+	if (msm_host->disp_en_gpio)
+		gpiod_set_value(msm_host->disp_en_gpio, 0);
+
+	msm_dsi_manager_phy_disable(msm_host->id);
+
+	dsi_clk_ctrl(msm_host, 0);
+
+	dsi_host_regulator_disable(msm_host);
+
+	DBG("-");
+
+	msm_host->power_on = false;
+
+unlock_ret:
+	mutex_unlock(&msm_host->dev_mutex);
+	return 0;
+}
+
+int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
+					struct drm_display_mode *mode)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	if (msm_host->mode) {
+		drm_mode_destroy(msm_host->dev, msm_host->mode);
+		msm_host->mode = NULL;
+	}
+
+	msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
+	if (IS_ERR(msm_host->mode)) {
+		pr_err("%s: cannot duplicate mode\n", __func__);
+		return PTR_ERR(msm_host->mode);
+	}
+
+	return 0;
+}
+
+struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
+				unsigned long *panel_flags)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+	struct drm_panel *panel;
+
+	panel = of_drm_find_panel(msm_host->panel_node);
+	if (panel_flags)
+			*panel_flags = msm_host->mode_flags;
+
+	return panel;
+}
+

+ 705 - 0
drivers/gpu/drm/msm/dsi/dsi_manager.c

@@ -0,0 +1,705 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_kms.h"
+#include "dsi.h"
+
+struct msm_dsi_manager {
+	struct msm_dsi *dsi[DSI_MAX];
+
+	bool is_dual_panel;
+	bool is_sync_needed;
+	int master_panel_id;
+};
+
+static struct msm_dsi_manager msm_dsim_glb;
+
+#define IS_DUAL_PANEL()		(msm_dsim_glb.is_dual_panel)
+#define IS_SYNC_NEEDED()	(msm_dsim_glb.is_sync_needed)
+#define IS_MASTER_PANEL(id)	(msm_dsim_glb.master_panel_id == id)
+
+static inline struct msm_dsi *dsi_mgr_get_dsi(int id)
+{
+	return msm_dsim_glb.dsi[id];
+}
+
+static inline struct msm_dsi *dsi_mgr_get_other_dsi(int id)
+{
+	return msm_dsim_glb.dsi[(id + 1) % DSI_MAX];
+}
+
+static int dsi_mgr_parse_dual_panel(struct device_node *np, int id)
+{
+	struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
+
+	/* We assume 2 dsi nodes have the same information of dual-panel and
+	 * sync-mode, and only one node specifies master in case of dual mode.
+	 */
+	if (!msm_dsim->is_dual_panel)
+		msm_dsim->is_dual_panel = of_property_read_bool(
+						np, "qcom,dual-panel-mode");
+
+	if (msm_dsim->is_dual_panel) {
+		if (of_property_read_bool(np, "qcom,master-panel"))
+			msm_dsim->master_panel_id = id;
+		if (!msm_dsim->is_sync_needed)
+			msm_dsim->is_sync_needed = of_property_read_bool(
+					np, "qcom,sync-dual-panel");
+	}
+
+	return 0;
+}
+
+struct dsi_connector {
+	struct drm_connector base;
+	int id;
+};
+
+struct dsi_bridge {
+	struct drm_bridge base;
+	int id;
+};
+
+#define to_dsi_connector(x) container_of(x, struct dsi_connector, base)
+#define to_dsi_bridge(x) container_of(x, struct dsi_bridge, base)
+
+static inline int dsi_mgr_connector_get_id(struct drm_connector *connector)
+{
+	struct dsi_connector *dsi_connector = to_dsi_connector(connector);
+	return dsi_connector->id;
+}
+
+static int dsi_mgr_bridge_get_id(struct drm_bridge *bridge)
+{
+	struct dsi_bridge *dsi_bridge = to_dsi_bridge(bridge);
+	return dsi_bridge->id;
+}
+
+static enum drm_connector_status dsi_mgr_connector_detect(
+		struct drm_connector *connector, bool force)
+{
+	int id = dsi_mgr_connector_get_id(connector);
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
+	struct msm_drm_private *priv = connector->dev->dev_private;
+	struct msm_kms *kms = priv->kms;
+
+	DBG("id=%d", id);
+	if (!msm_dsi->panel) {
+		msm_dsi->panel = msm_dsi_host_get_panel(msm_dsi->host,
+						&msm_dsi->panel_flags);
+
+		/* There is only 1 panel in the global panel list
+		 * for dual panel mode. Therefore slave dsi should get
+		 * the drm_panel instance from master dsi, and
+		 * keep using the panel flags got from the current DSI link.
+		 */
+		if (!msm_dsi->panel && IS_DUAL_PANEL() &&
+			!IS_MASTER_PANEL(id) && other_dsi)
+			msm_dsi->panel = msm_dsi_host_get_panel(
+					other_dsi->host, NULL);
+
+		if (msm_dsi->panel && IS_DUAL_PANEL())
+			drm_object_attach_property(&connector->base,
+				connector->dev->mode_config.tile_property, 0);
+
+		/* Set split display info to kms once dual panel is connected
+		 * to both hosts
+		 */
+		if (msm_dsi->panel && IS_DUAL_PANEL() &&
+			other_dsi && other_dsi->panel) {
+			bool cmd_mode = !(msm_dsi->panel_flags &
+						MIPI_DSI_MODE_VIDEO);
+			struct drm_encoder *encoder = msm_dsi_get_encoder(
+					dsi_mgr_get_dsi(DSI_ENCODER_MASTER));
+			struct drm_encoder *slave_enc = msm_dsi_get_encoder(
+					dsi_mgr_get_dsi(DSI_ENCODER_SLAVE));
+
+			if (kms->funcs->set_split_display)
+				kms->funcs->set_split_display(kms, encoder,
+							slave_enc, cmd_mode);
+			else
+				pr_err("mdp does not support dual panel\n");
+		}
+	}
+
+	return msm_dsi->panel ? connector_status_connected :
+		connector_status_disconnected;
+}
+
+static void dsi_mgr_connector_destroy(struct drm_connector *connector)
+{
+	DBG("");
+	drm_connector_unregister(connector);
+	drm_connector_cleanup(connector);
+}
+
+static void dsi_dual_connector_fix_modes(struct drm_connector *connector)
+{
+	struct drm_display_mode *mode, *m;
+
+	/* Only support left-right mode */
+	list_for_each_entry_safe(mode, m, &connector->probed_modes, head) {
+		mode->clock >>= 1;
+		mode->hdisplay >>= 1;
+		mode->hsync_start >>= 1;
+		mode->hsync_end >>= 1;
+		mode->htotal >>= 1;
+		drm_mode_set_name(mode);
+	}
+}
+
+static int dsi_dual_connector_tile_init(
+			struct drm_connector *connector, int id)
+{
+	struct drm_display_mode *mode;
+	/* Fake topology id */
+	char topo_id[8] = {'M', 'S', 'M', 'D', 'U', 'D', 'S', 'I'};
+
+	if (connector->tile_group) {
+		DBG("Tile property has been initialized");
+		return 0;
+	}
+
+	/* Use the first mode only for now */
+	mode = list_first_entry(&connector->probed_modes,
+				struct drm_display_mode,
+				head);
+	if (!mode)
+		return -EINVAL;
+
+	connector->tile_group = drm_mode_get_tile_group(
+					connector->dev, topo_id);
+	if (!connector->tile_group)
+		connector->tile_group = drm_mode_create_tile_group(
+					connector->dev, topo_id);
+	if (!connector->tile_group) {
+		pr_err("%s: failed to create tile group\n", __func__);
+		return -ENOMEM;
+	}
+
+	connector->has_tile = true;
+	connector->tile_is_single_monitor = true;
+
+	/* mode has been fixed */
+	connector->tile_h_size = mode->hdisplay;
+	connector->tile_v_size = mode->vdisplay;
+
+	/* Only support left-right mode */
+	connector->num_h_tile = 2;
+	connector->num_v_tile = 1;
+
+	connector->tile_v_loc = 0;
+	connector->tile_h_loc = (id == DSI_RIGHT) ? 1 : 0;
+
+	return 0;
+}
+
+static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
+{
+	int id = dsi_mgr_connector_get_id(connector);
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct drm_panel *panel = msm_dsi->panel;
+	int ret, num;
+
+	if (!panel)
+		return 0;
+
+	/* Since we have 2 connectors, but only 1 drm_panel in dual DSI mode,
+	 * panel should not attach to any connector.
+	 * Only temporarily attach panel to the current connector here,
+	 * to let panel set mode to this connector.
+	 */
+	drm_panel_attach(panel, connector);
+	num = drm_panel_get_modes(panel);
+	drm_panel_detach(panel);
+	if (!num)
+		return 0;
+
+	if (IS_DUAL_PANEL()) {
+		/* report half resolution to user */
+		dsi_dual_connector_fix_modes(connector);
+		ret = dsi_dual_connector_tile_init(connector, id);
+		if (ret)
+			return ret;
+		ret = drm_mode_connector_set_tile_property(connector);
+		if (ret) {
+			pr_err("%s: set tile property failed, %d\n",
+					__func__, ret);
+			return ret;
+		}
+	}
+
+	return num;
+}
+
+static int dsi_mgr_connector_mode_valid(struct drm_connector *connector,
+				struct drm_display_mode *mode)
+{
+	int id = dsi_mgr_connector_get_id(connector);
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct drm_encoder *encoder = msm_dsi_get_encoder(msm_dsi);
+	struct msm_drm_private *priv = connector->dev->dev_private;
+	struct msm_kms *kms = priv->kms;
+	long actual, requested;
+
+	DBG("");
+	requested = 1000 * mode->clock;
+	actual = kms->funcs->round_pixclk(kms, requested, encoder);
+
+	DBG("requested=%ld, actual=%ld", requested, actual);
+	if (actual != requested)
+		return MODE_CLOCK_RANGE;
+
+	return MODE_OK;
+}
+
+static struct drm_encoder *
+dsi_mgr_connector_best_encoder(struct drm_connector *connector)
+{
+	int id = dsi_mgr_connector_get_id(connector);
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+
+	DBG("");
+	return msm_dsi_get_encoder(msm_dsi);
+}
+
+static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
+{
+	int id = dsi_mgr_bridge_get_id(bridge);
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
+	struct mipi_dsi_host *host = msm_dsi->host;
+	struct drm_panel *panel = msm_dsi->panel;
+	bool is_dual_panel = IS_DUAL_PANEL();
+	int ret;
+
+	DBG("id=%d", id);
+	if (!panel || (is_dual_panel && (DSI_1 == id)))
+		return;
+
+	ret = msm_dsi_host_power_on(host);
+	if (ret) {
+		pr_err("%s: power on host %d failed, %d\n", __func__, id, ret);
+		goto host_on_fail;
+	}
+
+	if (is_dual_panel && msm_dsi1) {
+		ret = msm_dsi_host_power_on(msm_dsi1->host);
+		if (ret) {
+			pr_err("%s: power on host1 failed, %d\n",
+							__func__, ret);
+			goto host1_on_fail;
+		}
+	}
+
+	/* Always call panel functions once, because even for dual panels,
+	 * there is only one drm_panel instance.
+	 */
+	ret = drm_panel_prepare(panel);
+	if (ret) {
+		pr_err("%s: prepare panel %d failed, %d\n", __func__, id, ret);
+		goto panel_prep_fail;
+	}
+
+	ret = msm_dsi_host_enable(host);
+	if (ret) {
+		pr_err("%s: enable host %d failed, %d\n", __func__, id, ret);
+		goto host_en_fail;
+	}
+
+	if (is_dual_panel && msm_dsi1) {
+		ret = msm_dsi_host_enable(msm_dsi1->host);
+		if (ret) {
+			pr_err("%s: enable host1 failed, %d\n", __func__, ret);
+			goto host1_en_fail;
+		}
+	}
+
+	ret = drm_panel_enable(panel);
+	if (ret) {
+		pr_err("%s: enable panel %d failed, %d\n", __func__, id, ret);
+		goto panel_en_fail;
+	}
+
+	return;
+
+panel_en_fail:
+	if (is_dual_panel && msm_dsi1)
+		msm_dsi_host_disable(msm_dsi1->host);
+host1_en_fail:
+	msm_dsi_host_disable(host);
+host_en_fail:
+	drm_panel_unprepare(panel);
+panel_prep_fail:
+	if (is_dual_panel && msm_dsi1)
+		msm_dsi_host_power_off(msm_dsi1->host);
+host1_on_fail:
+	msm_dsi_host_power_off(host);
+host_on_fail:
+	return;
+}
+
+static void dsi_mgr_bridge_enable(struct drm_bridge *bridge)
+{
+	DBG("");
+}
+
+static void dsi_mgr_bridge_disable(struct drm_bridge *bridge)
+{
+	DBG("");
+}
+
+static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
+{
+	int id = dsi_mgr_bridge_get_id(bridge);
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
+	struct mipi_dsi_host *host = msm_dsi->host;
+	struct drm_panel *panel = msm_dsi->panel;
+	bool is_dual_panel = IS_DUAL_PANEL();
+	int ret;
+
+	DBG("id=%d", id);
+
+	if (!panel || (is_dual_panel && (DSI_1 == id)))
+		return;
+
+	ret = drm_panel_disable(panel);
+	if (ret)
+		pr_err("%s: Panel %d OFF failed, %d\n", __func__, id, ret);
+
+	ret = msm_dsi_host_disable(host);
+	if (ret)
+		pr_err("%s: host %d disable failed, %d\n", __func__, id, ret);
+
+	if (is_dual_panel && msm_dsi1) {
+		ret = msm_dsi_host_disable(msm_dsi1->host);
+		if (ret)
+			pr_err("%s: host1 disable failed, %d\n", __func__, ret);
+	}
+
+	ret = drm_panel_unprepare(panel);
+	if (ret)
+		pr_err("%s: Panel %d unprepare failed,%d\n", __func__, id, ret);
+
+	ret = msm_dsi_host_power_off(host);
+	if (ret)
+		pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
+
+	if (is_dual_panel && msm_dsi1) {
+		ret = msm_dsi_host_power_off(msm_dsi1->host);
+		if (ret)
+			pr_err("%s: host1 power off failed, %d\n",
+								__func__, ret);
+	}
+}
+
+static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	int id = dsi_mgr_bridge_get_id(bridge);
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
+	struct mipi_dsi_host *host = msm_dsi->host;
+	bool is_dual_panel = IS_DUAL_PANEL();
+
+	DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+			mode->base.id, mode->name,
+			mode->vrefresh, mode->clock,
+			mode->hdisplay, mode->hsync_start,
+			mode->hsync_end, mode->htotal,
+			mode->vdisplay, mode->vsync_start,
+			mode->vsync_end, mode->vtotal,
+			mode->type, mode->flags);
+
+	if (is_dual_panel && (DSI_1 == id))
+		return;
+
+	msm_dsi_host_set_display_mode(host, adjusted_mode);
+	if (is_dual_panel && other_dsi)
+		msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode);
+}
+
+static const struct drm_connector_funcs dsi_mgr_connector_funcs = {
+	.dpms = drm_atomic_helper_connector_dpms,
+	.detect = dsi_mgr_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = dsi_mgr_connector_destroy,
+	.reset = drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs dsi_mgr_conn_helper_funcs = {
+	.get_modes = dsi_mgr_connector_get_modes,
+	.mode_valid = dsi_mgr_connector_mode_valid,
+	.best_encoder = dsi_mgr_connector_best_encoder,
+};
+
+static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
+	.pre_enable = dsi_mgr_bridge_pre_enable,
+	.enable = dsi_mgr_bridge_enable,
+	.disable = dsi_mgr_bridge_disable,
+	.post_disable = dsi_mgr_bridge_post_disable,
+	.mode_set = dsi_mgr_bridge_mode_set,
+};
+
+/* initialize connector */
+struct drm_connector *msm_dsi_manager_connector_init(u8 id)
+{
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct drm_connector *connector = NULL;
+	struct dsi_connector *dsi_connector;
+	int ret;
+
+	dsi_connector = devm_kzalloc(msm_dsi->dev->dev,
+				sizeof(*dsi_connector), GFP_KERNEL);
+	if (!dsi_connector) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	dsi_connector->id = id;
+
+	connector = &dsi_connector->base;
+
+	ret = drm_connector_init(msm_dsi->dev, connector,
+			&dsi_mgr_connector_funcs, DRM_MODE_CONNECTOR_DSI);
+	if (ret)
+		goto fail;
+
+	drm_connector_helper_add(connector, &dsi_mgr_conn_helper_funcs);
+
+	/* Enable HPD to let hpd event is handled
+	 * when panel is attached to the host.
+	 */
+	connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+	/* Display driver doesn't support interlace now. */
+	connector->interlace_allowed = 0;
+	connector->doublescan_allowed = 0;
+
+	ret = drm_connector_register(connector);
+	if (ret)
+		goto fail;
+
+	return connector;
+
+fail:
+	if (connector)
+		dsi_mgr_connector_destroy(connector);
+
+	return ERR_PTR(ret);
+}
+
+/* initialize bridge */
+struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
+{
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct drm_bridge *bridge = NULL;
+	struct dsi_bridge *dsi_bridge;
+	int ret;
+
+	dsi_bridge = devm_kzalloc(msm_dsi->dev->dev,
+				sizeof(*dsi_bridge), GFP_KERNEL);
+	if (!dsi_bridge) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	dsi_bridge->id = id;
+
+	bridge = &dsi_bridge->base;
+	bridge->funcs = &dsi_mgr_bridge_funcs;
+
+	ret = drm_bridge_attach(msm_dsi->dev, bridge);
+	if (ret)
+		goto fail;
+
+	return bridge;
+
+fail:
+	if (bridge)
+		msm_dsi_manager_bridge_destroy(bridge);
+
+	return ERR_PTR(ret);
+}
+
+void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
+{
+}
+
+int msm_dsi_manager_phy_enable(int id,
+		const unsigned long bit_rate, const unsigned long esc_rate,
+		u32 *clk_pre, u32 *clk_post)
+{
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct msm_dsi_phy *phy = msm_dsi->phy;
+	int ret;
+
+	ret = msm_dsi_phy_enable(phy, IS_DUAL_PANEL(), bit_rate, esc_rate);
+	if (ret)
+		return ret;
+
+	msm_dsi->phy_enabled = true;
+	msm_dsi_phy_get_clk_pre_post(phy, clk_pre, clk_post);
+
+	return 0;
+}
+
+void msm_dsi_manager_phy_disable(int id)
+{
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
+	struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
+	struct msm_dsi_phy *phy = msm_dsi->phy;
+
+	/* disable DSI phy
+	 * In dual-dsi configuration, the phy should be disabled for the
+	 * first controller only when the second controller is disabled.
+	 */
+	msm_dsi->phy_enabled = false;
+	if (IS_DUAL_PANEL() && mdsi && sdsi) {
+		if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
+			msm_dsi_phy_disable(sdsi->phy);
+			msm_dsi_phy_disable(mdsi->phy);
+		}
+	} else {
+		msm_dsi_phy_disable(phy);
+	}
+}
+
+int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg)
+{
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
+	struct mipi_dsi_host *host = msm_dsi->host;
+	bool is_read = (msg->rx_buf && msg->rx_len);
+	bool need_sync = (IS_SYNC_NEEDED() && !is_read);
+	int ret;
+
+	if (!msg->tx_buf || !msg->tx_len)
+		return 0;
+
+	/* In dual master case, panel requires the same commands sent to
+	 * both DSI links. Host issues the command trigger to both links
+	 * when DSI_1 calls the cmd transfer function, no matter it happens
+	 * before or after DSI_0 cmd transfer.
+	 */
+	if (need_sync && (id == DSI_0))
+		return is_read ? msg->rx_len : msg->tx_len;
+
+	if (need_sync && msm_dsi0) {
+		ret = msm_dsi_host_xfer_prepare(msm_dsi0->host, msg);
+		if (ret) {
+			pr_err("%s: failed to prepare non-trigger host, %d\n",
+				__func__, ret);
+			return ret;
+		}
+	}
+	ret = msm_dsi_host_xfer_prepare(host, msg);
+	if (ret) {
+		pr_err("%s: failed to prepare host, %d\n", __func__, ret);
+		goto restore_host0;
+	}
+
+	ret = is_read ? msm_dsi_host_cmd_rx(host, msg) :
+			msm_dsi_host_cmd_tx(host, msg);
+
+	msm_dsi_host_xfer_restore(host, msg);
+
+restore_host0:
+	if (need_sync && msm_dsi0)
+		msm_dsi_host_xfer_restore(msm_dsi0->host, msg);
+
+	return ret;
+}
+
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len)
+{
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
+	struct mipi_dsi_host *host = msm_dsi->host;
+
+	if (IS_SYNC_NEEDED() && (id == DSI_0))
+		return false;
+
+	if (IS_SYNC_NEEDED() && msm_dsi0)
+		msm_dsi_host_cmd_xfer_commit(msm_dsi0->host, iova, len);
+
+	msm_dsi_host_cmd_xfer_commit(host, iova, len);
+
+	return true;
+}
+
+int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
+{
+	struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
+	int id = msm_dsi->id;
+	struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
+	int ret;
+
+	if (id > DSI_MAX) {
+		pr_err("%s: invalid id %d\n", __func__, id);
+		return -EINVAL;
+	}
+
+	if (msm_dsim->dsi[id]) {
+		pr_err("%s: dsi%d already registered\n", __func__, id);
+		return -EBUSY;
+	}
+
+	msm_dsim->dsi[id] = msm_dsi;
+
+	ret = dsi_mgr_parse_dual_panel(msm_dsi->pdev->dev.of_node, id);
+	if (ret) {
+		pr_err("%s: failed to parse dual panel info\n", __func__);
+		return ret;
+	}
+
+	if (!IS_DUAL_PANEL()) {
+		ret = msm_dsi_host_register(msm_dsi->host, true);
+	} else if (!other_dsi) {
+		return 0;
+	} else {
+		struct msm_dsi *mdsi = IS_MASTER_PANEL(id) ?
+					msm_dsi : other_dsi;
+		struct msm_dsi *sdsi = IS_MASTER_PANEL(id) ?
+					other_dsi : msm_dsi;
+		/* Register slave host first, so that slave DSI device
+		 * has a chance to probe, and do not block the master
+		 * DSI device's probe.
+		 * Also, do not check defer for the slave host,
+		 * because only master DSI device adds the panel to global
+		 * panel list. The panel's device is the master DSI device.
+		 */
+		ret = msm_dsi_host_register(sdsi->host, false);
+		if (ret)
+			return ret;
+		ret = msm_dsi_host_register(mdsi->host, true);
+	}
+
+	return ret;
+}
+
+void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi)
+{
+	struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
+
+	if (msm_dsi->host)
+		msm_dsi_host_unregister(msm_dsi->host);
+	msm_dsim->dsi[msm_dsi->id] = NULL;
+}
+

+ 352 - 0
drivers/gpu/drm/msm/dsi/dsi_phy.c

@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi.h"
+#include "dsi.xml.h"
+
+#define dsi_phy_read(offset) msm_readl((offset))
+#define dsi_phy_write(offset, data) msm_writel((data), (offset))
+
+struct dsi_dphy_timing {
+	u32 clk_pre;
+	u32 clk_post;
+	u32 clk_zero;
+	u32 clk_trail;
+	u32 clk_prepare;
+	u32 hs_exit;
+	u32 hs_zero;
+	u32 hs_prepare;
+	u32 hs_trail;
+	u32 hs_rqst;
+	u32 ta_go;
+	u32 ta_sure;
+	u32 ta_get;
+};
+
+struct msm_dsi_phy {
+	void __iomem *base;
+	void __iomem *reg_base;
+	int id;
+	struct dsi_dphy_timing timing;
+	int (*enable)(struct msm_dsi_phy *phy, bool is_dual_panel,
+		const unsigned long bit_rate, const unsigned long esc_rate);
+	int (*disable)(struct msm_dsi_phy *phy);
+};
+
+#define S_DIV_ROUND_UP(n, d)	\
+	(((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
+
+static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
+				s32 min_result, bool even)
+{
+	s32 v;
+	v = (tmax - tmin) * percent;
+	v = S_DIV_ROUND_UP(v, 100) + tmin;
+	if (even && (v & 0x1))
+		return max_t(s32, min_result, v - 1);
+	else
+		return max_t(s32, min_result, v);
+}
+
+static void dsi_dphy_timing_calc_clk_zero(struct dsi_dphy_timing *timing,
+					s32 ui, s32 coeff, s32 pcnt)
+{
+	s32 tmax, tmin, clk_z;
+	s32 temp;
+
+	/* reset */
+	temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
+	tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+	if (tmin > 255) {
+		tmax = 511;
+		clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
+	} else {
+		tmax = 255;
+		clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
+	}
+
+	/* adjust */
+	temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
+	timing->clk_zero = clk_z + 8 - temp;
+}
+
+static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing,
+	const unsigned long bit_rate, const unsigned long esc_rate)
+{
+	s32 ui, lpx;
+	s32 tmax, tmin;
+	s32 pcnt0 = 10;
+	s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
+	s32 pcnt2 = 10;
+	s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
+	s32 coeff = 1000; /* Precision, should avoid overflow */
+	s32 temp;
+
+	if (!bit_rate || !esc_rate)
+		return -EINVAL;
+
+	ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+	lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
+
+	tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
+	tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
+	timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
+
+	temp = lpx / ui;
+	if (temp & 0x1)
+		timing->hs_rqst = temp;
+	else
+		timing->hs_rqst = max_t(s32, 0, temp - 2);
+
+	/* Calculate clk_zero after clk_prepare and hs_rqst */
+	dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
+
+	temp = 105 * coeff + 12 * ui - 20 * coeff;
+	tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+	tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
+	timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
+
+	temp = 85 * coeff + 6 * ui;
+	tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+	temp = 40 * coeff + 4 * ui;
+	tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+	timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
+
+	tmax = 255;
+	temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
+	temp = 145 * coeff + 10 * ui - temp;
+	tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+	timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
+
+	temp = 105 * coeff + 12 * ui - 20 * coeff;
+	tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+	temp = 60 * coeff + 4 * ui;
+	tmin = DIV_ROUND_UP(temp, ui) - 2;
+	timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
+
+	tmax = 255;
+	tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
+	timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
+
+	tmax = 63;
+	temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
+	temp = 60 * coeff + 52 * ui - 24 * ui - temp;
+	tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
+	timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false);
+
+	tmax = 63;
+	temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
+	temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
+	temp += 8 * ui + lpx;
+	tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
+	if (tmin > tmax) {
+		temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false) >> 1;
+		timing->clk_pre = temp >> 1;
+		temp = (2 * tmax - tmin) * pcnt2;
+	} else {
+		timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false);
+	}
+
+	timing->ta_go = 3;
+	timing->ta_sure = 0;
+	timing->ta_get = 4;
+
+	DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+		timing->clk_pre, timing->clk_post, timing->clk_zero,
+		timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+		timing->hs_zero, timing->hs_prepare, timing->hs_trail,
+		timing->hs_rqst);
+
+	return 0;
+}
+
+static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+{
+	void __iomem *base = phy->reg_base;
+
+	if (!enable) {
+		dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+		return;
+	}
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
+}
+
+static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
+		const unsigned long bit_rate, const unsigned long esc_rate)
+{
+	struct dsi_dphy_timing *timing = &phy->timing;
+	int i;
+	void __iomem *base = phy->base;
+
+	DBG("");
+
+	if (dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
+		pr_err("%s: D-PHY timing calculation failed\n", __func__);
+		return -EINVAL;
+	}
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
+
+	dsi_28nm_phy_regulator_ctrl(phy, true);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
+		DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
+		DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
+		DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+	if (timing->clk_zero & BIT(8))
+		dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
+			DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
+		DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
+		DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
+		DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
+		DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
+		DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
+		DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+		DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
+		DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
+		DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
+
+	for (i = 0; i < 4; i++) {
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
+	}
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+	if (is_dual_panel && (phy->id != DSI_CLOCK_MASTER))
+		dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x00);
+	else
+		dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x01);
+
+	return 0;
+}
+
+static int dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
+{
+	dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
+	dsi_28nm_phy_regulator_ctrl(phy, false);
+
+	/*
+	 * Wait for the registers writes to complete in order to
+	 * ensure that the phy is completely disabled
+	 */
+	wmb();
+
+	return 0;
+}
+
+#define dsi_phy_func_init(name)	\
+	do {	\
+		phy->enable = dsi_##name##_phy_enable;	\
+		phy->disable = dsi_##name##_phy_disable;	\
+	} while (0)
+
+struct msm_dsi_phy *msm_dsi_phy_init(struct platform_device *pdev,
+			enum msm_dsi_phy_type type, int id)
+{
+	struct msm_dsi_phy *phy;
+
+	phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy)
+		return NULL;
+
+	phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
+	if (IS_ERR_OR_NULL(phy->base)) {
+		pr_err("%s: failed to map phy base\n", __func__);
+		return NULL;
+	}
+	phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", "DSI_PHY_REG");
+	if (IS_ERR_OR_NULL(phy->reg_base)) {
+		pr_err("%s: failed to map phy regulator base\n", __func__);
+		return NULL;
+	}
+
+	switch (type) {
+	case MSM_DSI_PHY_28NM:
+		dsi_phy_func_init(28nm);
+		break;
+	default:
+		pr_err("%s: unsupported type, %d\n", __func__, type);
+		return NULL;
+	}
+
+	phy->id = id;
+
+	return phy;
+}
+
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
+	const unsigned long bit_rate, const unsigned long esc_rate)
+{
+	if (!phy || !phy->enable)
+		return -EINVAL;
+	return phy->enable(phy, is_dual_panel, bit_rate, esc_rate);
+}
+
+int msm_dsi_phy_disable(struct msm_dsi_phy *phy)
+{
+	if (!phy || !phy->disable)
+		return -EINVAL;
+	return phy->disable(phy);
+}
+
+void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
+	u32 *clk_pre, u32 *clk_post)
+{
+	if (!phy)
+		return;
+	if (clk_pre)
+		*clk_pre = phy->timing.clk_pre;
+	if (clk_post)
+		*clk_post = phy->timing.clk_post;
+}
+

+ 34 - 0
drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c

@@ -53,6 +53,23 @@ struct pll_rate {
 
 /* NOTE: keep sorted highest freq to lowest: */
 static const struct pll_rate freqtbl[] = {
+	{ 154000000, {
+		{ 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG    },
+		{ 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+		{ 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+		{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0   },
+		{ 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1   },
+		{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3   },
+		{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5   },
+		{ 0x0d, REG_HDMI_8960_PHY_PLL_SDM_CFG0      },
+		{ 0x4d, REG_HDMI_8960_PHY_PLL_SDM_CFG1      },
+		{ 0x5e, REG_HDMI_8960_PHY_PLL_SDM_CFG2      },
+		{ 0x42, REG_HDMI_8960_PHY_PLL_SDM_CFG3      },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4      },
+		{ 0, 0 } }
+	},
 	/* 1080p60/1080p50 case */
 	{ 148500000, {
 		{ 0x02, REG_HDMI_8960_PHY_PLL_REFCLK_CFG    },
@@ -112,6 +129,23 @@ static const struct pll_rate freqtbl[] = {
 		{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2   },
 		{ 0, 0 } }
 	},
+	{ 74176000, {
+		{ 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG    },
+		{ 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+		{ 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+		{ 0xe5, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0   },
+		{ 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1   },
+		{ 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3   },
+		{ 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4   },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5   },
+		{ 0x0c, REG_HDMI_8960_PHY_PLL_SDM_CFG0      },
+		{ 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1      },
+		{ 0x7d, REG_HDMI_8960_PHY_PLL_SDM_CFG2      },
+		{ 0xbc, REG_HDMI_8960_PHY_PLL_SDM_CFG3      },
+		{ 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4      },
+		{ 0, 0 } }
+	},
 	{ 65000000, {
 		{ 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG    },
 		{ 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },

+ 258 - 141
drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h

@@ -8,9 +8,9 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml            (  27229 bytes, from 2015-02-10 17:00:41)
+- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml            (  29312 bytes, from 2015-03-23 21:18:48)
 - /local/mnt2/workspace2/sviau/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2014-06-02 18:31:15)
-- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml      (   2357 bytes, from 2015-01-23 16:20:19)
+- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml      (   2357 bytes, from 2015-03-23 20:38:49)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -37,11 +37,14 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 */
 
 
-enum mdp5_intf {
+enum mdp5_intf_type {
+	INTF_DISABLED = 0,
 	INTF_DSI = 1,
 	INTF_HDMI = 3,
 	INTF_LCDC = 5,
 	INTF_eDP = 9,
+	INTF_VIRTUAL = 100,
+	INTF_WB = 101,
 };
 
 enum mdp5_intfnum {
@@ -67,11 +70,11 @@ enum mdp5_pipe {
 
 enum mdp5_ctl_mode {
 	MODE_NONE = 0,
-	MODE_ROT0 = 1,
-	MODE_ROT1 = 2,
-	MODE_WB0 = 3,
-	MODE_WB1 = 4,
-	MODE_WFD = 5,
+	MODE_WB_0_BLOCK = 1,
+	MODE_WB_1_BLOCK = 2,
+	MODE_WB_0_LINE = 3,
+	MODE_WB_1_LINE = 4,
+	MODE_WB_2_LINE = 5,
 };
 
 enum mdp5_pack_3d {
@@ -94,33 +97,6 @@ enum mdp5_pipe_bwc {
 	BWC_Q_MED = 2,
 };
 
-enum mdp5_client_id {
-	CID_UNUSED = 0,
-	CID_VIG0_Y = 1,
-	CID_VIG0_CR = 2,
-	CID_VIG0_CB = 3,
-	CID_VIG1_Y = 4,
-	CID_VIG1_CR = 5,
-	CID_VIG1_CB = 6,
-	CID_VIG2_Y = 7,
-	CID_VIG2_CR = 8,
-	CID_VIG2_CB = 9,
-	CID_DMA0_Y = 10,
-	CID_DMA0_CR = 11,
-	CID_DMA0_CB = 12,
-	CID_DMA1_Y = 13,
-	CID_DMA1_CR = 14,
-	CID_DMA1_CB = 15,
-	CID_RGB0 = 16,
-	CID_RGB1 = 17,
-	CID_RGB2 = 18,
-	CID_VIG3_Y = 19,
-	CID_VIG3_CR = 20,
-	CID_VIG3_CB = 21,
-	CID_RGB3 = 22,
-	CID_MAX = 23,
-};
-
 enum mdp5_cursor_format {
 	CURSOR_FMT_ARGB8888 = 0,
 	CURSOR_FMT_ARGB1555 = 2,
@@ -144,30 +120,25 @@ enum mdp5_data_format {
 	DATA_FORMAT_YUV = 1,
 };
 
-#define MDP5_IRQ_INTF0_WB_ROT_COMP				0x00000001
-#define MDP5_IRQ_INTF1_WB_ROT_COMP				0x00000002
-#define MDP5_IRQ_INTF2_WB_ROT_COMP				0x00000004
-#define MDP5_IRQ_INTF3_WB_ROT_COMP				0x00000008
-#define MDP5_IRQ_INTF0_WB_WFD					0x00000010
-#define MDP5_IRQ_INTF1_WB_WFD					0x00000020
-#define MDP5_IRQ_INTF2_WB_WFD					0x00000040
-#define MDP5_IRQ_INTF3_WB_WFD					0x00000080
-#define MDP5_IRQ_INTF0_PING_PONG_COMP				0x00000100
-#define MDP5_IRQ_INTF1_PING_PONG_COMP				0x00000200
-#define MDP5_IRQ_INTF2_PING_PONG_COMP				0x00000400
-#define MDP5_IRQ_INTF3_PING_PONG_COMP				0x00000800
-#define MDP5_IRQ_INTF0_PING_PONG_RD_PTR				0x00001000
-#define MDP5_IRQ_INTF1_PING_PONG_RD_PTR				0x00002000
-#define MDP5_IRQ_INTF2_PING_PONG_RD_PTR				0x00004000
-#define MDP5_IRQ_INTF3_PING_PONG_RD_PTR				0x00008000
-#define MDP5_IRQ_INTF0_PING_PONG_WR_PTR				0x00010000
-#define MDP5_IRQ_INTF1_PING_PONG_WR_PTR				0x00020000
-#define MDP5_IRQ_INTF2_PING_PONG_WR_PTR				0x00040000
-#define MDP5_IRQ_INTF3_PING_PONG_WR_PTR				0x00080000
-#define MDP5_IRQ_INTF0_PING_PONG_AUTO_REF			0x00100000
-#define MDP5_IRQ_INTF1_PING_PONG_AUTO_REF			0x00200000
-#define MDP5_IRQ_INTF2_PING_PONG_AUTO_REF			0x00400000
-#define MDP5_IRQ_INTF3_PING_PONG_AUTO_REF			0x00800000
+#define MDP5_IRQ_WB_0_DONE					0x00000001
+#define MDP5_IRQ_WB_1_DONE					0x00000002
+#define MDP5_IRQ_WB_2_DONE					0x00000010
+#define MDP5_IRQ_PING_PONG_0_DONE				0x00000100
+#define MDP5_IRQ_PING_PONG_1_DONE				0x00000200
+#define MDP5_IRQ_PING_PONG_2_DONE				0x00000400
+#define MDP5_IRQ_PING_PONG_3_DONE				0x00000800
+#define MDP5_IRQ_PING_PONG_0_RD_PTR				0x00001000
+#define MDP5_IRQ_PING_PONG_1_RD_PTR				0x00002000
+#define MDP5_IRQ_PING_PONG_2_RD_PTR				0x00004000
+#define MDP5_IRQ_PING_PONG_3_RD_PTR				0x00008000
+#define MDP5_IRQ_PING_PONG_0_WR_PTR				0x00010000
+#define MDP5_IRQ_PING_PONG_1_WR_PTR				0x00020000
+#define MDP5_IRQ_PING_PONG_2_WR_PTR				0x00040000
+#define MDP5_IRQ_PING_PONG_3_WR_PTR				0x00080000
+#define MDP5_IRQ_PING_PONG_0_AUTO_REF				0x00100000
+#define MDP5_IRQ_PING_PONG_1_AUTO_REF				0x00200000
+#define MDP5_IRQ_PING_PONG_2_AUTO_REF				0x00400000
+#define MDP5_IRQ_PING_PONG_3_AUTO_REF				0x00800000
 #define MDP5_IRQ_INTF0_UNDER_RUN				0x01000000
 #define MDP5_IRQ_INTF0_VSYNC					0x02000000
 #define MDP5_IRQ_INTF1_UNDER_RUN				0x04000000
@@ -176,136 +147,186 @@ enum mdp5_data_format {
 #define MDP5_IRQ_INTF2_VSYNC					0x20000000
 #define MDP5_IRQ_INTF3_UNDER_RUN				0x40000000
 #define MDP5_IRQ_INTF3_VSYNC					0x80000000
-#define REG_MDP5_HW_VERSION					0x00000000
+#define REG_MDSS_HW_VERSION					0x00000000
+#define MDSS_HW_VERSION_STEP__MASK				0x0000ffff
+#define MDSS_HW_VERSION_STEP__SHIFT				0
+static inline uint32_t MDSS_HW_VERSION_STEP(uint32_t val)
+{
+	return ((val) << MDSS_HW_VERSION_STEP__SHIFT) & MDSS_HW_VERSION_STEP__MASK;
+}
+#define MDSS_HW_VERSION_MINOR__MASK				0x0fff0000
+#define MDSS_HW_VERSION_MINOR__SHIFT				16
+static inline uint32_t MDSS_HW_VERSION_MINOR(uint32_t val)
+{
+	return ((val) << MDSS_HW_VERSION_MINOR__SHIFT) & MDSS_HW_VERSION_MINOR__MASK;
+}
+#define MDSS_HW_VERSION_MAJOR__MASK				0xf0000000
+#define MDSS_HW_VERSION_MAJOR__SHIFT				28
+static inline uint32_t MDSS_HW_VERSION_MAJOR(uint32_t val)
+{
+	return ((val) << MDSS_HW_VERSION_MAJOR__SHIFT) & MDSS_HW_VERSION_MAJOR__MASK;
+}
+
+#define REG_MDSS_HW_INTR_STATUS					0x00000010
+#define MDSS_HW_INTR_STATUS_INTR_MDP				0x00000001
+#define MDSS_HW_INTR_STATUS_INTR_DSI0				0x00000010
+#define MDSS_HW_INTR_STATUS_INTR_DSI1				0x00000020
+#define MDSS_HW_INTR_STATUS_INTR_HDMI				0x00000100
+#define MDSS_HW_INTR_STATUS_INTR_EDP				0x00001000
 
-#define REG_MDP5_HW_INTR_STATUS					0x00000010
-#define MDP5_HW_INTR_STATUS_INTR_MDP				0x00000001
-#define MDP5_HW_INTR_STATUS_INTR_DSI0				0x00000010
-#define MDP5_HW_INTR_STATUS_INTR_DSI1				0x00000020
-#define MDP5_HW_INTR_STATUS_INTR_HDMI				0x00000100
-#define MDP5_HW_INTR_STATUS_INTR_EDP				0x00001000
+static inline uint32_t __offset_MDP(uint32_t idx)
+{
+	switch (idx) {
+		case 0: return (mdp5_cfg->mdp.base[0]);
+		default: return INVALID_IDX(idx);
+	}
+}
+static inline uint32_t REG_MDP5_MDP(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); }
 
-#define REG_MDP5_MDP_VERSION					0x00000100
-#define MDP5_MDP_VERSION_MINOR__MASK				0x00ff0000
-#define MDP5_MDP_VERSION_MINOR__SHIFT				16
-static inline uint32_t MDP5_MDP_VERSION_MINOR(uint32_t val)
+static inline uint32_t REG_MDP5_MDP_HW_VERSION(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); }
+#define MDP5_MDP_HW_VERSION_STEP__MASK				0x0000ffff
+#define MDP5_MDP_HW_VERSION_STEP__SHIFT				0
+static inline uint32_t MDP5_MDP_HW_VERSION_STEP(uint32_t val)
 {
-	return ((val) << MDP5_MDP_VERSION_MINOR__SHIFT) & MDP5_MDP_VERSION_MINOR__MASK;
+	return ((val) << MDP5_MDP_HW_VERSION_STEP__SHIFT) & MDP5_MDP_HW_VERSION_STEP__MASK;
 }
-#define MDP5_MDP_VERSION_MAJOR__MASK				0xf0000000
-#define MDP5_MDP_VERSION_MAJOR__SHIFT				28
-static inline uint32_t MDP5_MDP_VERSION_MAJOR(uint32_t val)
+#define MDP5_MDP_HW_VERSION_MINOR__MASK				0x0fff0000
+#define MDP5_MDP_HW_VERSION_MINOR__SHIFT			16
+static inline uint32_t MDP5_MDP_HW_VERSION_MINOR(uint32_t val)
 {
-	return ((val) << MDP5_MDP_VERSION_MAJOR__SHIFT) & MDP5_MDP_VERSION_MAJOR__MASK;
+	return ((val) << MDP5_MDP_HW_VERSION_MINOR__SHIFT) & MDP5_MDP_HW_VERSION_MINOR__MASK;
+}
+#define MDP5_MDP_HW_VERSION_MAJOR__MASK				0xf0000000
+#define MDP5_MDP_HW_VERSION_MAJOR__SHIFT			28
+static inline uint32_t MDP5_MDP_HW_VERSION_MAJOR(uint32_t val)
+{
+	return ((val) << MDP5_MDP_HW_VERSION_MAJOR__SHIFT) & MDP5_MDP_HW_VERSION_MAJOR__MASK;
 }
 
-#define REG_MDP5_DISP_INTF_SEL					0x00000104
-#define MDP5_DISP_INTF_SEL_INTF0__MASK				0x000000ff
-#define MDP5_DISP_INTF_SEL_INTF0__SHIFT				0
-static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf val)
+static inline uint32_t REG_MDP5_MDP_DISP_INTF_SEL(uint32_t i0) { return 0x00000004 + __offset_MDP(i0); }
+#define MDP5_MDP_DISP_INTF_SEL_INTF0__MASK			0x000000ff
+#define MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT			0
+static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val)
 {
-	return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK;
+	return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF0__MASK;
 }
-#define MDP5_DISP_INTF_SEL_INTF1__MASK				0x0000ff00
-#define MDP5_DISP_INTF_SEL_INTF1__SHIFT				8
-static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf val)
+#define MDP5_MDP_DISP_INTF_SEL_INTF1__MASK			0x0000ff00
+#define MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT			8
+static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val)
 {
-	return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK;
+	return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF1__MASK;
 }
-#define MDP5_DISP_INTF_SEL_INTF2__MASK				0x00ff0000
-#define MDP5_DISP_INTF_SEL_INTF2__SHIFT				16
-static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf val)
+#define MDP5_MDP_DISP_INTF_SEL_INTF2__MASK			0x00ff0000
+#define MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT			16
+static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val)
 {
-	return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK;
+	return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF2__MASK;
 }
-#define MDP5_DISP_INTF_SEL_INTF3__MASK				0xff000000
-#define MDP5_DISP_INTF_SEL_INTF3__SHIFT				24
-static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf val)
+#define MDP5_MDP_DISP_INTF_SEL_INTF3__MASK			0xff000000
+#define MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT			24
+static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val)
 {
-	return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK;
+	return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF3__MASK;
 }
 
-#define REG_MDP5_INTR_EN					0x00000110
+static inline uint32_t REG_MDP5_MDP_INTR_EN(uint32_t i0) { return 0x00000010 + __offset_MDP(i0); }
 
-#define REG_MDP5_INTR_STATUS					0x00000114
+static inline uint32_t REG_MDP5_MDP_INTR_STATUS(uint32_t i0) { return 0x00000014 + __offset_MDP(i0); }
 
-#define REG_MDP5_INTR_CLEAR					0x00000118
+static inline uint32_t REG_MDP5_MDP_INTR_CLEAR(uint32_t i0) { return 0x00000018 + __offset_MDP(i0); }
 
-#define REG_MDP5_HIST_INTR_EN					0x0000011c
+static inline uint32_t REG_MDP5_MDP_HIST_INTR_EN(uint32_t i0) { return 0x0000001c + __offset_MDP(i0); }
 
-#define REG_MDP5_HIST_INTR_STATUS				0x00000120
+static inline uint32_t REG_MDP5_MDP_HIST_INTR_STATUS(uint32_t i0) { return 0x00000020 + __offset_MDP(i0); }
 
-#define REG_MDP5_HIST_INTR_CLEAR				0x00000124
+static inline uint32_t REG_MDP5_MDP_HIST_INTR_CLEAR(uint32_t i0) { return 0x00000024 + __offset_MDP(i0); }
 
-static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000180 + 0x4*i0; }
+static inline uint32_t REG_MDP5_MDP_SPARE_0(uint32_t i0) { return 0x00000028 + __offset_MDP(i0); }
+#define MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN		0x00000001
 
-static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000180 + 0x4*i0; }
-#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK			0x000000ff
-#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT			0
-static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(enum mdp5_client_id val)
+static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W_REG(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; }
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK			0x000000ff
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT			0
+static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(uint32_t val)
 {
-	return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
+	return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK;
 }
-#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK			0x0000ff00
-#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT			8
-static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(enum mdp5_client_id val)
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK			0x0000ff00
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT			8
+static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(uint32_t val)
 {
-	return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
+	return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK;
 }
-#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK			0x00ff0000
-#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT			16
-static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(enum mdp5_client_id val)
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK			0x00ff0000
+#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT			16
+static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(uint32_t val)
 {
-	return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
+	return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK;
 }
 
-static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000230 + 0x4*i0; }
+static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; }
 
-static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000230 + 0x4*i0; }
-#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK			0x000000ff
-#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT			0
-static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(enum mdp5_client_id val)
+static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R_REG(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; }
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK			0x000000ff
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT			0
+static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0(uint32_t val)
 {
-	return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK;
+	return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK;
 }
-#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK			0x0000ff00
-#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT			8
-static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(enum mdp5_client_id val)
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK			0x0000ff00
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT			8
+static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1(uint32_t val)
 {
-	return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK;
+	return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK;
 }
-#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK			0x00ff0000
-#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT			16
-static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(enum mdp5_client_id val)
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK			0x00ff0000
+#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT			16
+static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2(uint32_t val)
 {
-	return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK;
+	return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK;
 }
 
 static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
 {
 	switch (idx) {
-		case IGC_VIG: return 0x00000300;
-		case IGC_RGB: return 0x00000310;
-		case IGC_DMA: return 0x00000320;
-		case IGC_DSPP: return 0x00000400;
+		case IGC_VIG: return 0x00000200;
+		case IGC_RGB: return 0x00000210;
+		case IGC_DMA: return 0x00000220;
+		case IGC_DSPP: return 0x00000300;
 		default: return INVALID_IDX(idx);
 	}
 }
-static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); }
+static inline uint32_t REG_MDP5_MDP_IGC(uint32_t i0, enum mdp5_igc_type i1) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1); }
 
-static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
+static inline uint32_t REG_MDP5_MDP_IGC_LUT(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; }
 
-static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
-#define MDP5_IGC_LUT_REG_VAL__MASK				0x00000fff
-#define MDP5_IGC_LUT_REG_VAL__SHIFT				0
-static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
+static inline uint32_t REG_MDP5_MDP_IGC_LUT_REG(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; }
+#define MDP5_MDP_IGC_LUT_REG_VAL__MASK				0x00000fff
+#define MDP5_MDP_IGC_LUT_REG_VAL__SHIFT				0
+static inline uint32_t MDP5_MDP_IGC_LUT_REG_VAL(uint32_t val)
 {
-	return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK;
+	return ((val) << MDP5_MDP_IGC_LUT_REG_VAL__SHIFT) & MDP5_MDP_IGC_LUT_REG_VAL__MASK;
 }
-#define MDP5_IGC_LUT_REG_INDEX_UPDATE				0x02000000
-#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0				0x10000000
-#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1				0x20000000
-#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2				0x40000000
+#define MDP5_MDP_IGC_LUT_REG_INDEX_UPDATE			0x02000000
+#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_0			0x10000000
+#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_1			0x20000000
+#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_2			0x40000000
+
+#define REG_MDP5_SPLIT_DPL_EN					0x000003f4
+
+#define REG_MDP5_SPLIT_DPL_UPPER				0x000003f8
+#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL			0x00000002
+#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN		0x00000004
+#define MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX			0x00000010
+#define MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX			0x00000100
+
+#define REG_MDP5_SPLIT_DPL_LOWER				0x000004f0
+#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL			0x00000002
+#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN		0x00000004
+#define MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC			0x00000010
+#define MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC			0x00000100
 
 static inline uint32_t __offset_CTL(uint32_t idx)
 {
@@ -437,11 +458,19 @@ static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000018 + __o
 #define MDP5_CTL_FLUSH_DSPP0					0x00002000
 #define MDP5_CTL_FLUSH_DSPP1					0x00004000
 #define MDP5_CTL_FLUSH_DSPP2					0x00008000
+#define MDP5_CTL_FLUSH_WB					0x00010000
 #define MDP5_CTL_FLUSH_CTL					0x00020000
 #define MDP5_CTL_FLUSH_VIG3					0x00040000
 #define MDP5_CTL_FLUSH_RGB3					0x00080000
 #define MDP5_CTL_FLUSH_LM5					0x00100000
 #define MDP5_CTL_FLUSH_DSPP3					0x00200000
+#define MDP5_CTL_FLUSH_CURSOR_0					0x00400000
+#define MDP5_CTL_FLUSH_CURSOR_1					0x00800000
+#define MDP5_CTL_FLUSH_CHROMADOWN_0				0x04000000
+#define MDP5_CTL_FLUSH_TIMING_3					0x10000000
+#define MDP5_CTL_FLUSH_TIMING_2					0x20000000
+#define MDP5_CTL_FLUSH_TIMING_1					0x40000000
+#define MDP5_CTL_FLUSH_TIMING_0					0x80000000
 
 static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __offset_CTL(i0); }
 
@@ -1117,6 +1146,94 @@ static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000002dc
 
 static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000002b0 + __offset_DSPP(i0); }
 
+static inline uint32_t __offset_PP(uint32_t idx)
+{
+	switch (idx) {
+		case 0: return (mdp5_cfg->pp.base[0]);
+		case 1: return (mdp5_cfg->pp.base[1]);
+		case 2: return (mdp5_cfg->pp.base[2]);
+		case 3: return (mdp5_cfg->pp.base[3]);
+		default: return INVALID_IDX(idx);
+	}
+}
+static inline uint32_t REG_MDP5_PP(uint32_t i0) { return 0x00000000 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_TEAR_CHECK_EN(uint32_t i0) { return 0x00000000 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_VSYNC(uint32_t i0) { return 0x00000004 + __offset_PP(i0); }
+#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK			0x0007ffff
+#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT			0
+static inline uint32_t MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(uint32_t val)
+{
+	return ((val) << MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT) & MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK;
+}
+#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN			0x00080000
+#define MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN				0x00100000
+
+static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_HEIGHT(uint32_t i0) { return 0x00000008 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_SYNC_WRCOUNT(uint32_t i0) { return 0x0000000c + __offset_PP(i0); }
+#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK			0x0000ffff
+#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT			0
+static inline uint32_t MDP5_PP_SYNC_WRCOUNT_LINE_COUNT(uint32_t val)
+{
+	return ((val) << MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK;
+}
+#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK			0xffff0000
+#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT			16
+static inline uint32_t MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT(uint32_t val)
+{
+	return ((val) << MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK;
+}
+
+static inline uint32_t REG_MDP5_PP_VSYNC_INIT_VAL(uint32_t i0) { return 0x00000010 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_INT_COUNT_VAL(uint32_t i0) { return 0x00000014 + __offset_PP(i0); }
+#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK			0x0000ffff
+#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT			0
+static inline uint32_t MDP5_PP_INT_COUNT_VAL_LINE_COUNT(uint32_t val)
+{
+	return ((val) << MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK;
+}
+#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK			0xffff0000
+#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT		16
+static inline uint32_t MDP5_PP_INT_COUNT_VAL_FRAME_COUNT(uint32_t val)
+{
+	return ((val) << MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK;
+}
+
+static inline uint32_t REG_MDP5_PP_SYNC_THRESH(uint32_t i0) { return 0x00000018 + __offset_PP(i0); }
+#define MDP5_PP_SYNC_THRESH_START__MASK				0x0000ffff
+#define MDP5_PP_SYNC_THRESH_START__SHIFT			0
+static inline uint32_t MDP5_PP_SYNC_THRESH_START(uint32_t val)
+{
+	return ((val) << MDP5_PP_SYNC_THRESH_START__SHIFT) & MDP5_PP_SYNC_THRESH_START__MASK;
+}
+#define MDP5_PP_SYNC_THRESH_CONTINUE__MASK			0xffff0000
+#define MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT			16
+static inline uint32_t MDP5_PP_SYNC_THRESH_CONTINUE(uint32_t val)
+{
+	return ((val) << MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT) & MDP5_PP_SYNC_THRESH_CONTINUE__MASK;
+}
+
+static inline uint32_t REG_MDP5_PP_START_POS(uint32_t i0) { return 0x0000001c + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_RD_PTR_IRQ(uint32_t i0) { return 0x00000020 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_WR_PTR_IRQ(uint32_t i0) { return 0x00000024 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_OUT_LINE_COUNT(uint32_t i0) { return 0x00000028 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_PP_LINE_COUNT(uint32_t i0) { return 0x0000002c + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_AUTOREFRESH_CONFIG(uint32_t i0) { return 0x00000030 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_FBC_MODE(uint32_t i0) { return 0x00000034 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_FBC_BUDGET_CTL(uint32_t i0) { return 0x00000038 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_FBC_LOSSY_MODE(uint32_t i0) { return 0x0000003c + __offset_PP(i0); }
+
 static inline uint32_t __offset_INTF(uint32_t idx)
 {
 	switch (idx) {

+ 97 - 5
drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -24,13 +24,23 @@ const struct mdp5_cfg_hw *mdp5_cfg = NULL;
 
 const struct mdp5_cfg_hw msm8x74_config = {
 	.name = "msm8x74",
+	.mdp = {
+		.count = 1,
+		.base = { 0x00100 },
+	},
 	.smp = {
 		.mmb_count = 22,
 		.mmb_size = 4096,
+		.clients = {
+			[SSPP_VIG0] =  1, [SSPP_VIG1] =  4, [SSPP_VIG2] =  7,
+			[SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+			[SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18,
+		},
 	},
 	.ctl = {
 		.count = 5,
 		.base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+		.flush_hw_mask = 0x0003ffff,
 	},
 	.pipe_vig = {
 		.count = 3,
@@ -57,27 +67,49 @@ const struct mdp5_cfg_hw msm8x74_config = {
 		.count = 2,
 		.base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
 	},
+	.pp = {
+		.count = 3,
+		.base = { 0x12d00, 0x12e00, 0x12f00 },
+	},
 	.intf = {
 		.count = 4,
 		.base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
 	},
+	.intfs = {
+		[0] = INTF_eDP,
+		[1] = INTF_DSI,
+		[2] = INTF_DSI,
+		[3] = INTF_HDMI,
+	},
 	.max_clk = 200000000,
 };
 
 const struct mdp5_cfg_hw apq8084_config = {
 	.name = "apq8084",
+	.mdp = {
+		.count = 1,
+		.base = { 0x00100 },
+	},
 	.smp = {
 		.mmb_count = 44,
 		.mmb_size = 8192,
+		.clients = {
+			[SSPP_VIG0] =  1, [SSPP_VIG1] =  4,
+			[SSPP_VIG2] =  7, [SSPP_VIG3] = 19,
+			[SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+			[SSPP_RGB0] = 16, [SSPP_RGB1] = 17,
+			[SSPP_RGB2] = 18, [SSPP_RGB3] = 22,
+		},
 		.reserved_state[0] = GENMASK(7, 0),	/* first 8 MMBs */
-		.reserved[CID_RGB0] = 2,
-		.reserved[CID_RGB1] = 2,
-		.reserved[CID_RGB2] = 2,
-		.reserved[CID_RGB3] = 2,
+		.reserved = {
+			/* Two SMP blocks are statically tied to RGB pipes: */
+			[16] = 2, [17] = 2, [18] = 2, [22] = 2,
+		},
 	},
 	.ctl = {
 		.count = 5,
 		.base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+		.flush_hw_mask = 0x003fffff,
 	},
 	.pipe_vig = {
 		.count = 4,
@@ -105,10 +137,69 @@ const struct mdp5_cfg_hw apq8084_config = {
 		.count = 3,
 		.base = { 0x13500, 0x13700, 0x13900 },
 	},
+	.pp = {
+		.count = 4,
+		.base = { 0x12f00, 0x13000, 0x13100, 0x13200 },
+	},
 	.intf = {
 		.count = 5,
 		.base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
 	},
+	.intfs = {
+		[0] = INTF_eDP,
+		[1] = INTF_DSI,
+		[2] = INTF_DSI,
+		[3] = INTF_HDMI,
+	},
+	.max_clk = 320000000,
+};
+
+const struct mdp5_cfg_hw msm8x16_config = {
+	.name = "msm8x16",
+	.mdp = {
+		.count = 1,
+		.base = { 0x01000 },
+	},
+	.smp = {
+		.mmb_count = 8,
+		.mmb_size = 8192,
+		.clients = {
+			[SSPP_VIG0] = 1, [SSPP_DMA0] = 4,
+			[SSPP_RGB0] = 7, [SSPP_RGB1] = 8,
+		},
+	},
+	.ctl = {
+		.count = 5,
+		.base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
+		.flush_hw_mask = 0x4003ffff,
+	},
+	.pipe_vig = {
+		.count = 1,
+		.base = { 0x05000 },
+	},
+	.pipe_rgb = {
+		.count = 2,
+		.base = { 0x15000, 0x17000 },
+	},
+	.pipe_dma = {
+		.count = 1,
+		.base = { 0x25000 },
+	},
+	.lm = {
+		.count = 2, /* LM0 and LM3 */
+		.base = { 0x45000, 0x48000 },
+		.nb_stages = 5,
+	},
+	.dspp = {
+		.count = 1,
+		.base = { 0x55000 },
+
+	},
+	.intf = {
+		.count = 1, /* INTF_1 */
+		.base = { 0x6B800 },
+	},
+	/* TODO enable .intfs[] with [1] = INTF_DSI, once DSI is implemented */
 	.max_clk = 320000000,
 };
 
@@ -116,6 +207,7 @@ static const struct mdp5_cfg_handler cfg_handlers[] = {
 	{ .revision = 0, .config = { .hw = &msm8x74_config } },
 	{ .revision = 2, .config = { .hw = &msm8x74_config } },
 	{ .revision = 3, .config = { .hw = &apq8084_config } },
+	{ .revision = 6, .config = { .hw = &msm8x16_config } },
 };
 
 

+ 17 - 1
drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h

@@ -44,26 +44,38 @@ struct mdp5_lm_block {
 	uint32_t nb_stages;		/* number of stages per blender */
 };
 
+struct mdp5_ctl_block {
+	MDP5_SUB_BLOCK_DEFINITION;
+	uint32_t flush_hw_mask;		/* FLUSH register's hardware mask */
+};
+
 struct mdp5_smp_block {
 	int mmb_count;			/* number of SMP MMBs */
 	int mmb_size;			/* MMB: size in bytes */
+	uint32_t clients[MAX_CLIENTS];	/* SMP port allocation /pipe */
 	mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */
 	int reserved[MAX_CLIENTS];	/* # of MMBs allocated per client */
 };
 
+#define MDP5_INTF_NUM_MAX	5
+
 struct mdp5_cfg_hw {
 	char  *name;
 
+	struct mdp5_sub_block mdp;
 	struct mdp5_smp_block smp;
-	struct mdp5_sub_block ctl;
+	struct mdp5_ctl_block ctl;
 	struct mdp5_sub_block pipe_vig;
 	struct mdp5_sub_block pipe_rgb;
 	struct mdp5_sub_block pipe_dma;
 	struct mdp5_lm_block  lm;
 	struct mdp5_sub_block dspp;
 	struct mdp5_sub_block ad;
+	struct mdp5_sub_block pp;
 	struct mdp5_sub_block intf;
 
+	u32 intfs[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */
+
 	uint32_t max_clk;
 };
 
@@ -84,6 +96,10 @@ const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hn
 struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd);
 int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd);
 
+#define mdp5_cfg_intf_is_virtual(intf_type) ({	\
+	typeof(intf_type) __val = (intf_type);	\
+	(__val) >= INTF_VIRTUAL ? true : false; })
+
 struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
 		uint32_t major, uint32_t minor);
 void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd);

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно