Prechádzať zdrojové kódy

Merge tag 'drm-intel-next-2013-11-29' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

- some more ppgtt prep patches from Ben
- a few fbc fixes from Ville
- power well rework from Imre
- vlv forcewake improvements from Deepak S, Ville and Jesse
- a few smaller things all over

[airlied: fixup forwcewake conflict]
* tag 'drm-intel-next-2013-11-29' of git://people.freedesktop.org/~danvet/drm-intel: (97 commits)
  drm/i915: Fix port name in vlv_wait_port_ready() timeout warning
  drm/i915: Return a drm_mode_status enum in the mode_valid vfuncs
  drm/i915: add intel_display_power_enabled_sw() for use in atomic ctx
  drm/i915: drop DRM_ERROR in intel_fbdev init
  drm/i915/vlv: use parallel context restore when coming out of RC6
  drm/i915/vlv: use a lower RC6 timeout on VLV
  drm/i915/sdvo: Fix up debug output to not split lines
  drm/i915: make sparse happy for the new vlv mmio read function
  drm/i915: drop the right force-wake engine in the vlv mmio funcs
  drm/i915: Fix GT wake FIFO free entries for VLV
  drm/i915: Report all GTFIFODBG errors
  drm/i915: Enabling DebugFS for valleyview forcewake counts
  drm/i915/vlv: Valleyview support for forcewake Individual power wells.
  drm/i915: Add power well arguments to force wake routines.
  drm/i915: Do not attempt to re-enable an unconnected primary plane
  drm/i915: add a debugfs entry for power domain info
  drm/i915: add a default always-on power well
  drm/i915: don't do BDW/HSW specific powerdomains init on other platforms
  drm/i915: protect HSW power well check with IS_HASWELL in redisable_vga
  drm/i915: use IS_HASWELL/BROADWELL instead of HAS_POWER_WELL
  ...

Conflicts:
	drivers/gpu/drm/i915/intel_display.c
Dave Airlie 11 rokov pred
rodič
commit
da32cc90cb
40 zmenil súbory, kde vykonal 2088 pridanie a 1184 odobranie
  1. 1 1
      drivers/char/Makefile
  2. 5 0
      drivers/char/agp/Kconfig
  3. 1 1
      drivers/char/agp/Makefile
  4. 0 5
      drivers/char/agp/intel-agp.c
  5. 18 0
      drivers/char/agp/intel-gtt.c
  6. 24 8
      drivers/gpu/drm/i915/Kconfig
  7. 4 69
      drivers/gpu/drm/i915/dvo_ns2501.c
  8. 121 42
      drivers/gpu/drm/i915/i915_debugfs.c
  9. 9 13
      drivers/gpu/drm/i915/i915_dma.c
  10. 20 22
      drivers/gpu/drm/i915/i915_drv.c
  11. 98 45
      drivers/gpu/drm/i915/i915_drv.h
  12. 7 1
      drivers/gpu/drm/i915/i915_gem.c
  13. 20 25
      drivers/gpu/drm/i915/i915_gem_context.c
  14. 5 6
      drivers/gpu/drm/i915/i915_gem_execbuffer.c
  15. 22 4
      drivers/gpu/drm/i915/i915_gem_gtt.c
  16. 11 6
      drivers/gpu/drm/i915/i915_irq.c
  17. 114 103
      drivers/gpu/drm/i915/i915_reg.h
  18. 0 45
      drivers/gpu/drm/i915/i915_suspend.c
  19. 17 18
      drivers/gpu/drm/i915/i915_sysfs.c
  20. 27 0
      drivers/gpu/drm/i915/i915_ums.c
  21. 3 2
      drivers/gpu/drm/i915/intel_crt.c
  22. 22 15
      drivers/gpu/drm/i915/intel_ddi.c
  23. 256 63
      drivers/gpu/drm/i915/intel_display.c
  24. 22 22
      drivers/gpu/drm/i915/intel_dp.c
  25. 20 5
      drivers/gpu/drm/i915/intel_drv.h
  26. 3 2
      drivers/gpu/drm/i915/intel_dsi.c
  27. 3 2
      drivers/gpu/drm/i915/intel_dvo.c
  28. 47 16
      drivers/gpu/drm/i915/intel_fbdev.c
  29. 29 38
      drivers/gpu/drm/i915/intel_hdmi.c
  30. 3 12
      drivers/gpu/drm/i915/intel_i2c.c
  31. 3 2
      drivers/gpu/drm/i915/intel_lvds.c
  32. 13 30
      drivers/gpu/drm/i915/intel_opregion.c
  33. 594 288
      drivers/gpu/drm/i915/intel_panel.c
  34. 224 172
      drivers/gpu/drm/i915/intel_pm.c
  35. 9 13
      drivers/gpu/drm/i915/intel_ringbuffer.c
  36. 40 20
      drivers/gpu/drm/i915/intel_sdvo.c
  37. 18 11
      drivers/gpu/drm/i915/intel_sideband.c
  38. 18 0
      drivers/gpu/drm/i915/intel_sprite.c
  39. 218 57
      drivers/gpu/drm/i915/intel_uncore.c
  40. 19 0
      include/uapi/drm/i915_drm.h

+ 1 - 1
drivers/char/Makefile

@@ -50,7 +50,7 @@ obj-$(CONFIG_GPIO_TB0219)	+= tb0219.o
 obj-$(CONFIG_TELCLOCK)		+= tlclk.o
 obj-$(CONFIG_TELCLOCK)		+= tlclk.o
 
 
 obj-$(CONFIG_MWAVE)		+= mwave/
 obj-$(CONFIG_MWAVE)		+= mwave/
-obj-$(CONFIG_AGP)		+= agp/
+obj-y				+= agp/
 obj-$(CONFIG_PCMCIA)		+= pcmcia/
 obj-$(CONFIG_PCMCIA)		+= pcmcia/
 
 
 obj-$(CONFIG_HANGCHECK_TIMER)	+= hangcheck-timer.o
 obj-$(CONFIG_HANGCHECK_TIMER)	+= hangcheck-timer.o

+ 5 - 0
drivers/char/agp/Kconfig

@@ -68,6 +68,7 @@ config AGP_AMD64
 config AGP_INTEL
 config AGP_INTEL
 	tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support"
 	tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support"
 	depends on AGP && X86
 	depends on AGP && X86
+	select INTEL_GTT
 	help
 	help
 	  This option gives you AGP support for the GLX component of X
 	  This option gives you AGP support for the GLX component of X
 	  on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875,
 	  on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875,
@@ -155,3 +156,7 @@ config AGP_SGI_TIOCA
           This option gives you AGP GART support for the SGI TIO chipset
           This option gives you AGP GART support for the SGI TIO chipset
           for IA64 processors.
           for IA64 processors.
 
 
+config INTEL_GTT
+	tristate
+	depends on X86 && PCI
+

+ 1 - 1
drivers/char/agp/Makefile

@@ -13,7 +13,7 @@ obj-$(CONFIG_AGP_HP_ZX1)	+= hp-agp.o
 obj-$(CONFIG_AGP_PARISC)	+= parisc-agp.o
 obj-$(CONFIG_AGP_PARISC)	+= parisc-agp.o
 obj-$(CONFIG_AGP_I460)		+= i460-agp.o
 obj-$(CONFIG_AGP_I460)		+= i460-agp.o
 obj-$(CONFIG_AGP_INTEL)		+= intel-agp.o
 obj-$(CONFIG_AGP_INTEL)		+= intel-agp.o
-obj-$(CONFIG_AGP_INTEL)		+= intel-gtt.o
+obj-$(CONFIG_INTEL_GTT)		+= intel-gtt.o
 obj-$(CONFIG_AGP_NVIDIA)	+= nvidia-agp.o
 obj-$(CONFIG_AGP_NVIDIA)	+= nvidia-agp.o
 obj-$(CONFIG_AGP_SGI_TIOCA)	+= sgi-agp.o
 obj-$(CONFIG_AGP_SGI_TIOCA)	+= sgi-agp.o
 obj-$(CONFIG_AGP_SIS)		+= sis-agp.o
 obj-$(CONFIG_AGP_SIS)		+= sis-agp.o

+ 0 - 5
drivers/char/agp/intel-agp.c

@@ -14,9 +14,6 @@
 #include "intel-agp.h"
 #include "intel-agp.h"
 #include <drm/intel-gtt.h>
 #include <drm/intel-gtt.h>
 
 
-int intel_agp_enabled;
-EXPORT_SYMBOL(intel_agp_enabled);
-
 static int intel_fetch_size(void)
 static int intel_fetch_size(void)
 {
 {
 	int i;
 	int i;
@@ -814,8 +811,6 @@ static int agp_intel_probe(struct pci_dev *pdev,
 found_gmch:
 found_gmch:
 	pci_set_drvdata(pdev, bridge);
 	pci_set_drvdata(pdev, bridge);
 	err = agp_add_bridge(bridge);
 	err = agp_add_bridge(bridge);
-	if (!err)
-		intel_agp_enabled = 1;
 	return err;
 	return err;
 }
 }
 
 

+ 18 - 0
drivers/char/agp/intel-gtt.c

@@ -94,6 +94,7 @@ static struct _intel_private {
 #define IS_IRONLAKE	intel_private.driver->is_ironlake
 #define IS_IRONLAKE	intel_private.driver->is_ironlake
 #define HAS_PGTBL_EN	intel_private.driver->has_pgtbl_enable
 #define HAS_PGTBL_EN	intel_private.driver->has_pgtbl_enable
 
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 static int intel_gtt_map_memory(struct page **pages,
 static int intel_gtt_map_memory(struct page **pages,
 				unsigned int num_entries,
 				unsigned int num_entries,
 				struct sg_table *st)
 				struct sg_table *st)
@@ -168,6 +169,7 @@ static void i8xx_destroy_pages(struct page *page)
 	__free_pages(page, 2);
 	__free_pages(page, 2);
 	atomic_dec(&agp_bridge->current_memory_agp);
 	atomic_dec(&agp_bridge->current_memory_agp);
 }
 }
+#endif
 
 
 #define I810_GTT_ORDER 4
 #define I810_GTT_ORDER 4
 static int i810_setup(void)
 static int i810_setup(void)
@@ -209,6 +211,7 @@ static void i810_cleanup(void)
 	free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
 	free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
 }
 }
 
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
 static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
 				      int type)
 				      int type)
 {
 {
@@ -289,6 +292,7 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
 	}
 	}
 	kfree(curr);
 	kfree(curr);
 }
 }
+#endif
 
 
 static int intel_gtt_setup_scratch_page(void)
 static int intel_gtt_setup_scratch_page(void)
 {
 {
@@ -647,7 +651,9 @@ static int intel_gtt_init(void)
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 	global_cache_flush();   /* FIXME: ? */
 	global_cache_flush();   /* FIXME: ? */
+#endif
 
 
 	intel_private.stolen_size = intel_gtt_stolen_size();
 	intel_private.stolen_size = intel_gtt_stolen_size();
 
 
@@ -671,6 +677,7 @@ static int intel_gtt_init(void)
 	return 0;
 	return 0;
 }
 }
 
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 static int intel_fake_agp_fetch_size(void)
 static int intel_fake_agp_fetch_size(void)
 {
 {
 	int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
 	int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
@@ -689,6 +696,7 @@ static int intel_fake_agp_fetch_size(void)
 
 
 	return 0;
 	return 0;
 }
 }
+#endif
 
 
 static void i830_cleanup(void)
 static void i830_cleanup(void)
 {
 {
@@ -801,6 +809,7 @@ static int i830_setup(void)
 	return 0;
 	return 0;
 }
 }
 
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
 static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
 {
 {
 	agp_bridge->gatt_table_real = NULL;
 	agp_bridge->gatt_table_real = NULL;
@@ -825,6 +834,7 @@ static int intel_fake_agp_configure(void)
 
 
 	return 0;
 	return 0;
 }
 }
+#endif
 
 
 static bool i830_check_flags(unsigned int flags)
 static bool i830_check_flags(unsigned int flags)
 {
 {
@@ -863,6 +873,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
 }
 }
 EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
 EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
 
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 static void intel_gtt_insert_pages(unsigned int first_entry,
 static void intel_gtt_insert_pages(unsigned int first_entry,
 				   unsigned int num_entries,
 				   unsigned int num_entries,
 				   struct page **pages,
 				   struct page **pages,
@@ -928,6 +939,7 @@ out_err:
 	mem->is_flushed = true;
 	mem->is_flushed = true;
 	return ret;
 	return ret;
 }
 }
+#endif
 
 
 void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
 void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
 {
 {
@@ -941,6 +953,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
 }
 }
 EXPORT_SYMBOL(intel_gtt_clear_range);
 EXPORT_SYMBOL(intel_gtt_clear_range);
 
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 static int intel_fake_agp_remove_entries(struct agp_memory *mem,
 static int intel_fake_agp_remove_entries(struct agp_memory *mem,
 					 off_t pg_start, int type)
 					 off_t pg_start, int type)
 {
 {
@@ -982,6 +995,7 @@ static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
 	/* always return NULL for other allocation types for now */
 	/* always return NULL for other allocation types for now */
 	return NULL;
 	return NULL;
 }
 }
+#endif
 
 
 static int intel_alloc_chipset_flush_resource(void)
 static int intel_alloc_chipset_flush_resource(void)
 {
 {
@@ -1138,6 +1152,7 @@ static int i9xx_setup(void)
 	return 0;
 	return 0;
 }
 }
 
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 static const struct agp_bridge_driver intel_fake_agp_driver = {
 static const struct agp_bridge_driver intel_fake_agp_driver = {
 	.owner			= THIS_MODULE,
 	.owner			= THIS_MODULE,
 	.size_type		= FIXED_APER_SIZE,
 	.size_type		= FIXED_APER_SIZE,
@@ -1159,6 +1174,7 @@ static const struct agp_bridge_driver intel_fake_agp_driver = {
 	.agp_destroy_page	= agp_generic_destroy_page,
 	.agp_destroy_page	= agp_generic_destroy_page,
 	.agp_destroy_pages      = agp_generic_destroy_pages,
 	.agp_destroy_pages      = agp_generic_destroy_pages,
 };
 };
+#endif
 
 
 static const struct intel_gtt_driver i81x_gtt_driver = {
 static const struct intel_gtt_driver i81x_gtt_driver = {
 	.gen = 1,
 	.gen = 1,
@@ -1376,11 +1392,13 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
 
 
 	intel_private.refcount++;
 	intel_private.refcount++;
 
 
+#if IS_ENABLED(CONFIG_AGP_INTEL)
 	if (bridge) {
 	if (bridge) {
 		bridge->driver = &intel_fake_agp_driver;
 		bridge->driver = &intel_fake_agp_driver;
 		bridge->dev_private_data = &intel_private;
 		bridge->dev_private_data = &intel_private;
 		bridge->dev = bridge_pdev;
 		bridge->dev = bridge_pdev;
 	}
 	}
+#endif
 
 
 	intel_private.bridge_dev = pci_dev_get(bridge_pdev);
 	intel_private.bridge_dev = pci_dev_get(bridge_pdev);
 
 

+ 24 - 8
drivers/gpu/drm/i915/Kconfig

@@ -1,8 +1,10 @@
 config DRM_I915
 config DRM_I915
 	tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
 	tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
 	depends on DRM
 	depends on DRM
-	depends on AGP
-	depends on AGP_INTEL
+	depends on X86 && PCI
+	depends on (AGP || AGP=n)
+	select INTEL_GTT
+	select AGP_INTEL if AGP
 	# we need shmfs for the swappable backing store, and in particular
 	# we need shmfs for the swappable backing store, and in particular
 	# the shmem_readpage() which depends upon tmpfs
 	# the shmem_readpage() which depends upon tmpfs
 	select SHMEM
 	select SHMEM
@@ -35,15 +37,14 @@ config DRM_I915
 config DRM_I915_KMS
 config DRM_I915_KMS
 	bool "Enable modesetting on intel by default"
 	bool "Enable modesetting on intel by default"
 	depends on DRM_I915
 	depends on DRM_I915
+	default y
 	help
 	help
-	  Choose this option if you want kernel modesetting enabled by default,
-	  and you have a new enough userspace to support this. Running old
-	  userspaces with this enabled will cause pain.  Note that this causes
-	  the driver to bind to PCI devices, which precludes loading things
-	  like intelfb.
+	  Choose this option if you want kernel modesetting enabled by default.
+
+	  If in doubt, say "Y".
 
 
 config DRM_I915_FBDEV
 config DRM_I915_FBDEV
-	bool "Enable legacy fbdev support for the modesettting intel driver"
+	bool "Enable legacy fbdev support for the modesetting intel driver"
 	depends on DRM_I915
 	depends on DRM_I915
 	select DRM_KMS_FB_HELPER
 	select DRM_KMS_FB_HELPER
 	select FB_CFB_FILLRECT
 	select FB_CFB_FILLRECT
@@ -55,9 +56,12 @@ config DRM_I915_FBDEV
 	  support. Note that this support also provide the linux console
 	  support. Note that this support also provide the linux console
 	  support on top of the intel modesetting driver.
 	  support on top of the intel modesetting driver.
 
 
+	  If in doubt, say "Y".
+
 config DRM_I915_PRELIMINARY_HW_SUPPORT
 config DRM_I915_PRELIMINARY_HW_SUPPORT
 	bool "Enable preliminary support for prerelease Intel hardware by default"
 	bool "Enable preliminary support for prerelease Intel hardware by default"
 	depends on DRM_I915
 	depends on DRM_I915
+	default n
 	help
 	help
 	  Choose this option if you have prerelease Intel hardware and want the
 	  Choose this option if you have prerelease Intel hardware and want the
 	  i915 driver to support it by default.  You can enable such support at
 	  i915 driver to support it by default.  You can enable such support at
@@ -65,3 +69,15 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
 	  option changes the default for that module option.
 	  option changes the default for that module option.
 
 
 	  If in doubt, say "N".
 	  If in doubt, say "N".
+
+config DRM_I915_UMS
+	bool "Enable userspace modesetting on Intel hardware (DEPRECATED)"
+	depends on DRM_I915
+	default n
+	help
+	  Choose this option if you still need userspace modesetting.
+
+	  Userspace modesetting is deprecated for quite some time now, so
+	  enable this only if you have ancient versions of the DDX drivers.
+
+	  If in doubt, say "N".

+ 4 - 69
drivers/gpu/drm/i915/dvo_ns2501.c

@@ -87,49 +87,6 @@ struct ns2501_priv {
  * when switching the resolution.
  * when switching the resolution.
  */
  */
 
 
-static void enable_dvo(struct intel_dvo_device *dvo)
-{
-	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
-	struct i2c_adapter *adapter = dvo->i2c_bus;
-	struct intel_gmbus *bus = container_of(adapter,
-					       struct intel_gmbus,
-					       adapter);
-	struct drm_i915_private *dev_priv = bus->dev_priv;
-
-	DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__);
-
-	ns->dvoc = I915_READ(DVO_C);
-	ns->pll_a = I915_READ(_DPLL_A);
-	ns->srcdim = I915_READ(DVOC_SRCDIM);
-	ns->fw_blc = I915_READ(FW_BLC);
-
-	I915_WRITE(DVOC, 0x10004084);
-	I915_WRITE(_DPLL_A, 0xd0820000);
-	I915_WRITE(DVOC_SRCDIM, 0x400300);	// 1024x768
-	I915_WRITE(FW_BLC, 0x1080304);
-
-	I915_WRITE(DVOC, 0x90004084);
-}
-
-/*
- * Restore the I915 registers modified by the above
- * trigger function.
- */
-static void restore_dvo(struct intel_dvo_device *dvo)
-{
-	struct i2c_adapter *adapter = dvo->i2c_bus;
-	struct intel_gmbus *bus = container_of(adapter,
-					       struct intel_gmbus,
-					       adapter);
-	struct drm_i915_private *dev_priv = bus->dev_priv;
-	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
-
-	I915_WRITE(DVOC, ns->dvoc);
-	I915_WRITE(_DPLL_A, ns->pll_a);
-	I915_WRITE(DVOC_SRCDIM, ns->srcdim);
-	I915_WRITE(FW_BLC, ns->fw_blc);
-}
-
 /*
 /*
 ** Read a register from the ns2501.
 ** Read a register from the ns2501.
 ** Returns true if successful, false otherwise.
 ** Returns true if successful, false otherwise.
@@ -300,7 +257,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
 			    struct drm_display_mode *adjusted_mode)
 			    struct drm_display_mode *adjusted_mode)
 {
 {
 	bool ok;
 	bool ok;
-	bool restore = false;
+	int retries = 10;
 	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
 	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
 
 
 	DRM_DEBUG_KMS
 	DRM_DEBUG_KMS
@@ -476,20 +433,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
 			ns->reg_8_shadow |= NS2501_8_BPAS;
 			ns->reg_8_shadow |= NS2501_8_BPAS;
 		}
 		}
 		ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
 		ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
-
-		if (!ok) {
-			if (restore)
-				restore_dvo(dvo);
-			enable_dvo(dvo);
-			restore = true;
-		}
-	} while (!ok);
-	/*
-	 * Restore the old i915 registers before
-	 * forcing the ns2501 on.
-	 */
-	if (restore)
-		restore_dvo(dvo);
+	} while (!ok && retries--);
 }
 }
 
 
 /* set the NS2501 power state */
 /* set the NS2501 power state */
@@ -510,7 +454,7 @@ static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
 static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
 static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
 {
 {
 	bool ok;
 	bool ok;
-	bool restore = false;
+	int retries = 10;
 	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
 	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
 	unsigned char ch;
 	unsigned char ch;
 
 
@@ -537,16 +481,7 @@ static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
 			ok &=
 			ok &=
 			    ns2501_writeb(dvo, 0x35,
 			    ns2501_writeb(dvo, 0x35,
 					  enable ? 0xff : 0x00);
 					  enable ? 0xff : 0x00);
-			if (!ok) {
-				if (restore)
-					restore_dvo(dvo);
-				enable_dvo(dvo);
-				restore = true;
-			}
-		} while (!ok);
-
-		if (restore)
-			restore_dvo(dvo);
+		} while (!ok && retries--);
 	}
 	}
 }
 }
 
 

+ 121 - 42
drivers/gpu/drm/i915/i915_debugfs.c

@@ -947,7 +947,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
 		if (ret)
 		if (ret)
 			return ret;
 			return ret;
 
 
-		gen6_gt_force_wake_get(dev_priv);
+		gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
 
 		reqf = I915_READ(GEN6_RPNSWREQ);
 		reqf = I915_READ(GEN6_RPNSWREQ);
 		reqf &= ~GEN6_TURBO_DISABLE;
 		reqf &= ~GEN6_TURBO_DISABLE;
@@ -970,7 +970,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
 			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
 			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
 		cagf *= GT_FREQUENCY_MULTIPLIER;
 		cagf *= GT_FREQUENCY_MULTIPLIER;
 
 
-		gen6_gt_force_wake_put(dev_priv);
+		gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 		mutex_unlock(&dev->struct_mutex);
 		mutex_unlock(&dev->struct_mutex);
 
 
 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
@@ -1018,17 +1018,16 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
 
 
-		val = vlv_punit_read(dev_priv, PUNIT_FUSE_BUS1);
+		val = valleyview_rps_max_freq(dev_priv);
 		seq_printf(m, "max GPU freq: %d MHz\n",
 		seq_printf(m, "max GPU freq: %d MHz\n",
-			   vlv_gpu_freq(dev_priv->mem_freq, val));
+			   vlv_gpu_freq(dev_priv, val));
 
 
-		val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM);
+		val = valleyview_rps_min_freq(dev_priv);
 		seq_printf(m, "min GPU freq: %d MHz\n",
 		seq_printf(m, "min GPU freq: %d MHz\n",
-			   vlv_gpu_freq(dev_priv->mem_freq, val));
+			   vlv_gpu_freq(dev_priv, val));
 
 
 		seq_printf(m, "current GPU freq: %d MHz\n",
 		seq_printf(m, "current GPU freq: %d MHz\n",
-			   vlv_gpu_freq(dev_priv->mem_freq,
-					(freq_sts >> 8) & 0xff));
+			   vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
 		mutex_unlock(&dev_priv->rps.hw_lock);
 		mutex_unlock(&dev_priv->rps.hw_lock);
 	} else {
 	} else {
 		seq_puts(m, "no P-state info available\n");
 		seq_puts(m, "no P-state info available\n");
@@ -1565,13 +1564,21 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned forcewake_count;
+	unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0;
 
 
 	spin_lock_irq(&dev_priv->uncore.lock);
 	spin_lock_irq(&dev_priv->uncore.lock);
-	forcewake_count = dev_priv->uncore.forcewake_count;
+	if (IS_VALLEYVIEW(dev)) {
+		fw_rendercount = dev_priv->uncore.fw_rendercount;
+		fw_mediacount = dev_priv->uncore.fw_mediacount;
+	} else
+		forcewake_count = dev_priv->uncore.forcewake_count;
 	spin_unlock_irq(&dev_priv->uncore.lock);
 	spin_unlock_irq(&dev_priv->uncore.lock);
 
 
-	seq_printf(m, "forcewake count = %u\n", forcewake_count);
+	if (IS_VALLEYVIEW(dev)) {
+		seq_printf(m, "fw_rendercount = %u\n", fw_rendercount);
+		seq_printf(m, "fw_mediacount = %u\n", fw_mediacount);
+	} else
+		seq_printf(m, "forcewake count = %u\n", forcewake_count);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1735,28 +1742,28 @@ static int i915_dpio_info(struct seq_file *m, void *data)
 
 
 	seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
 	seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
 
 
-	seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_A));
-	seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_B));
+	seq_printf(m, "DPIO PLL DW3 CH0 : 0x%08x\n",
+		   vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(0)));
+	seq_printf(m, "DPIO PLL DW3 CH1: 0x%08x\n",
+		   vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW3(1)));
 
 
-	seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_A));
-	seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_B));
+	seq_printf(m, "DPIO PLL DW5 CH0: 0x%08x\n",
+		   vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(0)));
+	seq_printf(m, "DPIO PLL DW5 CH1: 0x%08x\n",
+		   vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW5(1)));
 
 
-	seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_A));
-	seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_B));
+	seq_printf(m, "DPIO PLL DW7 CH0: 0x%08x\n",
+		   vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(0)));
+	seq_printf(m, "DPIO PLL DW7 CH1: 0x%08x\n",
+		   vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW7(1)));
 
 
-	seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_A));
-	seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_B));
+	seq_printf(m, "DPIO PLL DW10 CH0: 0x%08x\n",
+		   vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(0)));
+	seq_printf(m, "DPIO PLL DW10 CH1: 0x%08x\n",
+		   vlv_dpio_read(dev_priv, PIPE_A, VLV_PLL_DW10(1)));
 
 
 	seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
 	seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, PIPE_A, DPIO_FASTCLK_DISABLE));
+		   vlv_dpio_read(dev_priv, PIPE_A, VLV_CMN_DW0));
 
 
 	mutex_unlock(&dev_priv->dpio_lock);
 	mutex_unlock(&dev_priv->dpio_lock);
 
 
@@ -1845,6 +1852,76 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
 	return 0;
 	return 0;
 }
 }
 
 
+static const char *power_domain_str(enum intel_display_power_domain domain)
+{
+	switch (domain) {
+	case POWER_DOMAIN_PIPE_A:
+		return "PIPE_A";
+	case POWER_DOMAIN_PIPE_B:
+		return "PIPE_B";
+	case POWER_DOMAIN_PIPE_C:
+		return "PIPE_C";
+	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
+		return "PIPE_A_PANEL_FITTER";
+	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
+		return "PIPE_B_PANEL_FITTER";
+	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
+		return "PIPE_C_PANEL_FITTER";
+	case POWER_DOMAIN_TRANSCODER_A:
+		return "TRANSCODER_A";
+	case POWER_DOMAIN_TRANSCODER_B:
+		return "TRANSCODER_B";
+	case POWER_DOMAIN_TRANSCODER_C:
+		return "TRANSCODER_C";
+	case POWER_DOMAIN_TRANSCODER_EDP:
+		return "TRANSCODER_EDP";
+	case POWER_DOMAIN_VGA:
+		return "VGA";
+	case POWER_DOMAIN_AUDIO:
+		return "AUDIO";
+	case POWER_DOMAIN_INIT:
+		return "INIT";
+	default:
+		WARN_ON(1);
+		return "?";
+	}
+}
+
+static int i915_power_domain_info(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	int i;
+
+	mutex_lock(&power_domains->lock);
+
+	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
+	for (i = 0; i < power_domains->power_well_count; i++) {
+		struct i915_power_well *power_well;
+		enum intel_display_power_domain power_domain;
+
+		power_well = &power_domains->power_wells[i];
+		seq_printf(m, "%-25s %d\n", power_well->name,
+			   power_well->count);
+
+		for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
+		     power_domain++) {
+			if (!(BIT(power_domain) & power_well->domains))
+				continue;
+
+			seq_printf(m, "  %-23s %d\n",
+				 power_domain_str(power_domain),
+				 power_domains->domain_use_count[power_domain]);
+		}
+	}
+
+	mutex_unlock(&power_domains->lock);
+
+	return 0;
+}
+
 struct pipe_crc_info {
 struct pipe_crc_info {
 	const char *name;
 	const char *name;
 	struct drm_device *dev;
 	struct drm_device *dev;
@@ -1857,6 +1934,9 @@ static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
 	struct drm_i915_private *dev_priv = info->dev->dev_private;
 	struct drm_i915_private *dev_priv = info->dev->dev_private;
 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
 
 
+	if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
+		return -ENODEV;
+
 	spin_lock_irq(&pipe_crc->lock);
 	spin_lock_irq(&pipe_crc->lock);
 
 
 	if (pipe_crc->opened) {
 	if (pipe_crc->opened) {
@@ -2347,7 +2427,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
-	u32 val;
+	u32 val = 0; /* shut up gcc */
 	int ret;
 	int ret;
 
 
 	if (pipe_crc->source == source)
 	if (pipe_crc->source == source)
@@ -2742,7 +2822,7 @@ i915_drop_caches_set(void *data, u64 val)
 	struct i915_vma *vma, *x;
 	struct i915_vma *vma, *x;
 	int ret;
 	int ret;
 
 
-	DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
+	DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
 
 
 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
 	 * on ioctls on -EAGAIN. */
 	 * on ioctls on -EAGAIN. */
@@ -2810,8 +2890,7 @@ i915_max_freq_get(void *data, u64 *val)
 		return ret;
 		return ret;
 
 
 	if (IS_VALLEYVIEW(dev))
 	if (IS_VALLEYVIEW(dev))
-		*val = vlv_gpu_freq(dev_priv->mem_freq,
-				    dev_priv->rps.max_delay);
+		*val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay);
 	else
 	else
 		*val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
 		*val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
 	mutex_unlock(&dev_priv->rps.hw_lock);
 	mutex_unlock(&dev_priv->rps.hw_lock);
@@ -2841,9 +2920,9 @@ i915_max_freq_set(void *data, u64 val)
 	 * Turbo will still be enabled, but won't go above the set value.
 	 * Turbo will still be enabled, but won't go above the set value.
 	 */
 	 */
 	if (IS_VALLEYVIEW(dev)) {
 	if (IS_VALLEYVIEW(dev)) {
-		val = vlv_freq_opcode(dev_priv->mem_freq, val);
+		val = vlv_freq_opcode(dev_priv, val);
 		dev_priv->rps.max_delay = val;
 		dev_priv->rps.max_delay = val;
-		gen6_set_rps(dev, val);
+		valleyview_set_rps(dev, val);
 	} else {
 	} else {
 		do_div(val, GT_FREQUENCY_MULTIPLIER);
 		do_div(val, GT_FREQUENCY_MULTIPLIER);
 		dev_priv->rps.max_delay = val;
 		dev_priv->rps.max_delay = val;
@@ -2876,8 +2955,7 @@ i915_min_freq_get(void *data, u64 *val)
 		return ret;
 		return ret;
 
 
 	if (IS_VALLEYVIEW(dev))
 	if (IS_VALLEYVIEW(dev))
-		*val = vlv_gpu_freq(dev_priv->mem_freq,
-				    dev_priv->rps.min_delay);
+		*val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay);
 	else
 	else
 		*val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
 		*val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
 	mutex_unlock(&dev_priv->rps.hw_lock);
 	mutex_unlock(&dev_priv->rps.hw_lock);
@@ -2907,7 +2985,7 @@ i915_min_freq_set(void *data, u64 val)
 	 * Turbo will still be enabled, but won't go below the set value.
 	 * Turbo will still be enabled, but won't go below the set value.
 	 */
 	 */
 	if (IS_VALLEYVIEW(dev)) {
 	if (IS_VALLEYVIEW(dev)) {
-		val = vlv_freq_opcode(dev_priv->mem_freq, val);
+		val = vlv_freq_opcode(dev_priv, val);
 		dev_priv->rps.min_delay = val;
 		dev_priv->rps.min_delay = val;
 		valleyview_set_rps(dev, val);
 		valleyview_set_rps(dev, val);
 	} else {
 	} else {
@@ -2983,7 +3061,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
 	if (INTEL_INFO(dev)->gen < 6)
 	if (INTEL_INFO(dev)->gen < 6)
 		return 0;
 		return 0;
 
 
-	gen6_gt_force_wake_get(dev_priv);
+	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -2996,7 +3074,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
 	if (INTEL_INFO(dev)->gen < 6)
 	if (INTEL_INFO(dev)->gen < 6)
 		return 0;
 		return 0;
 
 
-	gen6_gt_force_wake_put(dev_priv);
+	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -3079,6 +3157,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
 	{"i915_energy_uJ", i915_energy_uJ, 0},
 	{"i915_energy_uJ", i915_energy_uJ, 0},
 	{"i915_pc8_status", i915_pc8_status, 0},
 	{"i915_pc8_status", i915_pc8_status, 0},
+	{"i915_power_domain_info", i915_power_domain_info, 0},
 };
 };
 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
 
 
@@ -3102,10 +3181,10 @@ static const struct i915_debugfs_files {
 void intel_display_crc_init(struct drm_device *dev)
 void intel_display_crc_init(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int i;
+	enum pipe pipe;
 
 
-	for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
-		struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[i];
+	for_each_pipe(pipe) {
+		struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
 
 
 		pipe_crc->opened = false;
 		pipe_crc->opened = false;
 		spin_lock_init(&pipe_crc->lock);
 		spin_lock_init(&pipe_crc->lock);

+ 9 - 13
drivers/gpu/drm/i915/i915_dma.c

@@ -1486,7 +1486,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
 
 	spin_lock_init(&dev_priv->irq_lock);
 	spin_lock_init(&dev_priv->irq_lock);
 	spin_lock_init(&dev_priv->gpu_error.lock);
 	spin_lock_init(&dev_priv->gpu_error.lock);
-	spin_lock_init(&dev_priv->backlight.lock);
+	spin_lock_init(&dev_priv->backlight_lock);
 	spin_lock_init(&dev_priv->uncore.lock);
 	spin_lock_init(&dev_priv->uncore.lock);
 	spin_lock_init(&dev_priv->mm.object_stat_lock);
 	spin_lock_init(&dev_priv->mm.object_stat_lock);
 	mutex_init(&dev_priv->dpio_lock);
 	mutex_init(&dev_priv->dpio_lock);
@@ -1639,8 +1639,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 			goto out_gem_unload;
 			goto out_gem_unload;
 	}
 	}
 
 
-	if (HAS_POWER_WELL(dev))
-		intel_power_domains_init(dev);
+	intel_power_domains_init(dev);
 
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 		ret = i915_load_modeset_init(dev);
 		ret = i915_load_modeset_init(dev);
@@ -1667,8 +1666,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	return 0;
 	return 0;
 
 
 out_power_well:
 out_power_well:
-	if (HAS_POWER_WELL(dev))
-		intel_power_domains_remove(dev);
+	intel_power_domains_remove(dev);
 	drm_vblank_cleanup(dev);
 	drm_vblank_cleanup(dev);
 out_gem_unload:
 out_gem_unload:
 	if (dev_priv->mm.inactive_shrinker.scan_objects)
 	if (dev_priv->mm.inactive_shrinker.scan_objects)
@@ -1706,13 +1704,11 @@ int i915_driver_unload(struct drm_device *dev)
 
 
 	intel_gpu_ips_teardown();
 	intel_gpu_ips_teardown();
 
 
-	if (HAS_POWER_WELL(dev)) {
-		/* The i915.ko module is still not prepared to be loaded when
-		 * the power well is not enabled, so just enable it in case
-		 * we're going to unload/reload. */
-		intel_display_set_init_power(dev, true);
-		intel_power_domains_remove(dev);
-	}
+	/* The i915.ko module is still not prepared to be loaded when
+	 * the power well is not enabled, so just enable it in case
+	 * we're going to unload/reload. */
+	intel_display_set_init_power(dev, true);
+	intel_power_domains_remove(dev);
 
 
 	i915_teardown_sysfs(dev);
 	i915_teardown_sysfs(dev);
 
 
@@ -1777,7 +1773,6 @@ int i915_driver_unload(struct drm_device *dev)
 
 
 	list_del(&dev_priv->gtt.base.global_link);
 	list_del(&dev_priv->gtt.base.global_link);
 	WARN_ON(!list_empty(&dev_priv->vm_list));
 	WARN_ON(!list_empty(&dev_priv->vm_list));
-	drm_mm_takedown(&dev_priv->gtt.base.mm);
 
 
 	drm_vblank_cleanup(dev);
 	drm_vblank_cleanup(dev);
 
 
@@ -1908,6 +1903,7 @@ const struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 };
 };
 
 
 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);

+ 20 - 22
drivers/gpu/drm/i915/i915_drv.c

@@ -114,7 +114,7 @@ MODULE_PARM_DESC(enable_hangcheck,
 		"(default: true)");
 		"(default: true)");
 
 
 int i915_enable_ppgtt __read_mostly = -1;
 int i915_enable_ppgtt __read_mostly = -1;
-module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
+module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0400);
 MODULE_PARM_DESC(i915_enable_ppgtt,
 MODULE_PARM_DESC(i915_enable_ppgtt,
 		"Enable PPGTT (default: true)");
 		"Enable PPGTT (default: true)");
 
 
@@ -155,7 +155,6 @@ MODULE_PARM_DESC(prefault_disable,
 		"Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
 		"Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
 
 
 static struct drm_driver driver;
 static struct drm_driver driver;
-extern int intel_agp_enabled;
 
 
 static const struct intel_device_info intel_i830_info = {
 static const struct intel_device_info intel_i830_info = {
 	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
 	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
@@ -265,6 +264,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
 static const struct intel_device_info intel_sandybridge_d_info = {
 static const struct intel_device_info intel_sandybridge_d_info = {
 	.gen = 6, .num_pipes = 2,
 	.gen = 6, .num_pipes = 2,
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_fbc = 1,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
 	.has_llc = 1,
 	.has_llc = 1,
 };
 };
@@ -280,6 +280,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
 #define GEN7_FEATURES  \
 #define GEN7_FEATURES  \
 	.gen = 7, .num_pipes = 3, \
 	.gen = 7, .num_pipes = 3, \
 	.need_gfx_hws = 1, .has_hotplug = 1, \
 	.need_gfx_hws = 1, .has_hotplug = 1, \
+	.has_fbc = 1, \
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
 	.has_llc = 1
 	.has_llc = 1
 
 
@@ -292,7 +293,6 @@ static const struct intel_device_info intel_ivybridge_m_info = {
 	GEN7_FEATURES,
 	GEN7_FEATURES,
 	.is_ivybridge = 1,
 	.is_ivybridge = 1,
 	.is_mobile = 1,
 	.is_mobile = 1,
-	.has_fbc = 1,
 };
 };
 
 
 static const struct intel_device_info intel_ivybridge_q_info = {
 static const struct intel_device_info intel_ivybridge_q_info = {
@@ -307,6 +307,7 @@ static const struct intel_device_info intel_valleyview_m_info = {
 	.num_pipes = 2,
 	.num_pipes = 2,
 	.is_valleyview = 1,
 	.is_valleyview = 1,
 	.display_mmio_offset = VLV_DISPLAY_BASE,
 	.display_mmio_offset = VLV_DISPLAY_BASE,
+	.has_fbc = 0, /* legal, last one wins */
 	.has_llc = 0, /* legal, last one wins */
 	.has_llc = 0, /* legal, last one wins */
 };
 };
 
 
@@ -315,6 +316,7 @@ static const struct intel_device_info intel_valleyview_d_info = {
 	.num_pipes = 2,
 	.num_pipes = 2,
 	.is_valleyview = 1,
 	.is_valleyview = 1,
 	.display_mmio_offset = VLV_DISPLAY_BASE,
 	.display_mmio_offset = VLV_DISPLAY_BASE,
+	.has_fbc = 0, /* legal, last one wins */
 	.has_llc = 0, /* legal, last one wins */
 	.has_llc = 0, /* legal, last one wins */
 };
 };
 
 
@@ -332,7 +334,6 @@ static const struct intel_device_info intel_haswell_m_info = {
 	.is_mobile = 1,
 	.is_mobile = 1,
 	.has_ddi = 1,
 	.has_ddi = 1,
 	.has_fpga_dbg = 1,
 	.has_fpga_dbg = 1,
-	.has_fbc = 1,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
 };
 };
 
 
@@ -761,14 +762,14 @@ int i915_reset(struct drm_device *dev)
 		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
 		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
 		dev_priv->gpu_error.stop_rings = 0;
 		dev_priv->gpu_error.stop_rings = 0;
 		if (ret == -ENODEV) {
 		if (ret == -ENODEV) {
-			DRM_ERROR("Reset not implemented, but ignoring "
-				  "error for simulated gpu hangs\n");
+			DRM_INFO("Reset not implemented, but ignoring "
+				 "error for simulated gpu hangs\n");
 			ret = 0;
 			ret = 0;
 		}
 		}
 	}
 	}
 
 
 	if (ret) {
 	if (ret) {
-		DRM_ERROR("Failed to reset chip.\n");
+		DRM_ERROR("Failed to reset chip: %i\n", ret);
 		mutex_unlock(&dev->struct_mutex);
 		mutex_unlock(&dev->struct_mutex);
 		return ret;
 		return ret;
 	}
 	}
@@ -789,12 +790,9 @@ int i915_reset(struct drm_device *dev)
 	 */
 	 */
 	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
 	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
 			!dev_priv->ums.mm_suspended) {
 			!dev_priv->ums.mm_suspended) {
-		bool hw_contexts_disabled = dev_priv->hw_contexts_disabled;
 		dev_priv->ums.mm_suspended = 0;
 		dev_priv->ums.mm_suspended = 0;
 
 
 		ret = i915_gem_init_hw(dev);
 		ret = i915_gem_init_hw(dev);
-		if (!hw_contexts_disabled && dev_priv->hw_contexts_disabled)
-			DRM_ERROR("HW contexts didn't survive reset\n");
 		mutex_unlock(&dev->struct_mutex);
 		mutex_unlock(&dev->struct_mutex);
 		if (ret) {
 		if (ret) {
 			DRM_ERROR("Failed hw init on reset %d\n", ret);
 			DRM_ERROR("Failed hw init on reset %d\n", ret);
@@ -830,17 +828,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (PCI_FUNC(pdev->devfn))
 	if (PCI_FUNC(pdev->devfn))
 		return -ENODEV;
 		return -ENODEV;
 
 
-	/* We've managed to ship a kms-enabled ddx that shipped with an XvMC
-	 * implementation for gen3 (and only gen3) that used legacy drm maps
-	 * (gasp!) to share buffers between X and the client. Hence we need to
-	 * keep around the fake agp stuff for gen3, even when kms is enabled. */
-	if (intel_info->gen != 3) {
-		driver.driver_features &=
-			~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
-	} else if (!intel_agp_enabled) {
-		DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
-		return -ENODEV;
-	}
+	driver.driver_features &= ~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
 
 
 	return drm_get_pci_dev(pdev, ent, &driver);
 	return drm_get_pci_dev(pdev, ent, &driver);
 }
 }
@@ -1023,14 +1011,24 @@ static int __init i915_init(void)
 		driver.driver_features &= ~DRIVER_MODESET;
 		driver.driver_features &= ~DRIVER_MODESET;
 #endif
 #endif
 
 
-	if (!(driver.driver_features & DRIVER_MODESET))
+	if (!(driver.driver_features & DRIVER_MODESET)) {
 		driver.get_vblank_timestamp = NULL;
 		driver.get_vblank_timestamp = NULL;
+#ifndef CONFIG_DRM_I915_UMS
+		/* Silently fail loading to not upset userspace. */
+		return 0;
+#endif
+	}
 
 
 	return drm_pci_init(&driver, &i915_pci_driver);
 	return drm_pci_init(&driver, &i915_pci_driver);
 }
 }
 
 
 static void __exit i915_exit(void)
 static void __exit i915_exit(void)
 {
 {
+#ifndef CONFIG_DRM_I915_UMS
+	if (!(driver.driver_features & DRIVER_MODESET))
+		return; /* Never loaded a driver. */
+#endif
+
 	drm_pci_exit(&driver, &i915_pci_driver);
 	drm_pci_exit(&driver, &i915_pci_driver);
 }
 }
 
 

+ 98 - 45
drivers/gpu/drm/i915/i915_drv.h

@@ -89,6 +89,18 @@ enum port {
 };
 };
 #define port_name(p) ((p) + 'A')
 #define port_name(p) ((p) + 'A')
 
 
+#define I915_NUM_PHYS_VLV 1
+
+enum dpio_channel {
+	DPIO_CH0,
+	DPIO_CH1
+};
+
+enum dpio_phy {
+	DPIO_PHY0,
+	DPIO_PHY1
+};
+
 enum intel_display_power_domain {
 enum intel_display_power_domain {
 	POWER_DOMAIN_PIPE_A,
 	POWER_DOMAIN_PIPE_A,
 	POWER_DOMAIN_PIPE_B,
 	POWER_DOMAIN_PIPE_B,
@@ -101,6 +113,7 @@ enum intel_display_power_domain {
 	POWER_DOMAIN_TRANSCODER_C,
 	POWER_DOMAIN_TRANSCODER_C,
 	POWER_DOMAIN_TRANSCODER_EDP,
 	POWER_DOMAIN_TRANSCODER_EDP,
 	POWER_DOMAIN_VGA,
 	POWER_DOMAIN_VGA,
+	POWER_DOMAIN_AUDIO,
 	POWER_DOMAIN_INIT,
 	POWER_DOMAIN_INIT,
 
 
 	POWER_DOMAIN_NUM,
 	POWER_DOMAIN_NUM,
@@ -351,6 +364,7 @@ struct drm_i915_error_state {
 	enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
 	enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
 };
 };
 
 
+struct intel_connector;
 struct intel_crtc_config;
 struct intel_crtc_config;
 struct intel_crtc;
 struct intel_crtc;
 struct intel_limit;
 struct intel_limit;
@@ -413,11 +427,20 @@ struct drm_i915_display_funcs {
 	/* render clock increase/decrease */
 	/* render clock increase/decrease */
 	/* display clock increase/decrease */
 	/* display clock increase/decrease */
 	/* pll clock increase/decrease */
 	/* pll clock increase/decrease */
+
+	int (*setup_backlight)(struct intel_connector *connector);
+	uint32_t (*get_backlight)(struct intel_connector *connector);
+	void (*set_backlight)(struct intel_connector *connector,
+			      uint32_t level);
+	void (*disable_backlight)(struct intel_connector *connector);
+	void (*enable_backlight)(struct intel_connector *connector);
 };
 };
 
 
 struct intel_uncore_funcs {
 struct intel_uncore_funcs {
-	void (*force_wake_get)(struct drm_i915_private *dev_priv);
-	void (*force_wake_put)(struct drm_i915_private *dev_priv);
+	void (*force_wake_get)(struct drm_i915_private *dev_priv,
+							int fw_engine);
+	void (*force_wake_put)(struct drm_i915_private *dev_priv,
+							int fw_engine);
 
 
 	uint8_t  (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
 	uint8_t  (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
 	uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
 	uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
@@ -442,6 +465,9 @@ struct intel_uncore {
 	unsigned fifo_count;
 	unsigned fifo_count;
 	unsigned forcewake_count;
 	unsigned forcewake_count;
 
 
+	unsigned fw_rendercount;
+	unsigned fw_mediacount;
+
 	struct delayed_work force_wake_work;
 	struct delayed_work force_wake_work;
 };
 };
 
 
@@ -708,7 +734,6 @@ enum intel_sbi_destination {
 #define QUIRK_PIPEA_FORCE (1<<0)
 #define QUIRK_PIPEA_FORCE (1<<0)
 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
-#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
 
 
 struct intel_fbdev;
 struct intel_fbdev;
 struct intel_fbc_work;
 struct intel_fbc_work;
@@ -761,8 +786,6 @@ struct i915_suspend_saved_registers {
 	u32 saveBLC_PWM_CTL;
 	u32 saveBLC_PWM_CTL;
 	u32 saveBLC_PWM_CTL2;
 	u32 saveBLC_PWM_CTL2;
 	u32 saveBLC_HIST_CTL_B;
 	u32 saveBLC_HIST_CTL_B;
-	u32 saveBLC_PWM_CTL_B;
-	u32 saveBLC_PWM_CTL2_B;
 	u32 saveBLC_CPU_PWM_CTL;
 	u32 saveBLC_CPU_PWM_CTL;
 	u32 saveBLC_CPU_PWM_CTL2;
 	u32 saveBLC_CPU_PWM_CTL2;
 	u32 saveFPB0;
 	u32 saveFPB0;
@@ -932,21 +955,29 @@ struct intel_ilk_power_mgmt {
 
 
 /* Power well structure for haswell */
 /* Power well structure for haswell */
 struct i915_power_well {
 struct i915_power_well {
+	const char *name;
+	bool always_on;
 	/* power well enable/disable usage count */
 	/* power well enable/disable usage count */
 	int count;
 	int count;
+	unsigned long domains;
+	void *data;
+	void (*set)(struct drm_device *dev, struct i915_power_well *power_well,
+		    bool enable);
+	bool (*is_enabled)(struct drm_device *dev,
+			   struct i915_power_well *power_well);
 };
 };
 
 
-#define I915_MAX_POWER_WELLS 1
-
 struct i915_power_domains {
 struct i915_power_domains {
 	/*
 	/*
 	 * Power wells needed for initialization at driver init and suspend
 	 * Power wells needed for initialization at driver init and suspend
 	 * time are on. They are kept on until after the first modeset.
 	 * time are on. They are kept on until after the first modeset.
 	 */
 	 */
 	bool init_power_on;
 	bool init_power_on;
+	int power_well_count;
 
 
 	struct mutex lock;
 	struct mutex lock;
-	struct i915_power_well power_wells[I915_MAX_POWER_WELLS];
+	int domain_use_count[POWER_DOMAIN_NUM];
+	struct i915_power_well *power_wells;
 };
 };
 
 
 struct i915_dri1_state {
 struct i915_dri1_state {
@@ -1077,34 +1108,30 @@ struct i915_gpu_error {
 	unsigned long missed_irq_rings;
 	unsigned long missed_irq_rings;
 
 
 	/**
 	/**
-	 * State variable and reset counter controlling the reset flow
+	 * State variable controlling the reset flow and count
 	 *
 	 *
-	 * Upper bits are for the reset counter.  This counter is used by the
-	 * wait_seqno code to race-free noticed that a reset event happened and
-	 * that it needs to restart the entire ioctl (since most likely the
-	 * seqno it waited for won't ever signal anytime soon).
+	 * This is a counter which gets incremented when reset is triggered,
+	 * and again when reset has been handled. So odd values (lowest bit set)
+	 * means that reset is in progress and even values that
+	 * (reset_counter >> 1):th reset was successfully completed.
+	 *
+	 * If reset is not completed succesfully, the I915_WEDGE bit is
+	 * set meaning that hardware is terminally sour and there is no
+	 * recovery. All waiters on the reset_queue will be woken when
+	 * that happens.
+	 *
+	 * This counter is used by the wait_seqno code to notice that reset
+	 * event happened and it needs to restart the entire ioctl (since most
+	 * likely the seqno it waited for won't ever signal anytime soon).
 	 *
 	 *
 	 * This is important for lock-free wait paths, where no contended lock
 	 * This is important for lock-free wait paths, where no contended lock
 	 * naturally enforces the correct ordering between the bail-out of the
 	 * naturally enforces the correct ordering between the bail-out of the
 	 * waiter and the gpu reset work code.
 	 * waiter and the gpu reset work code.
-	 *
-	 * Lowest bit controls the reset state machine: Set means a reset is in
-	 * progress. This state will (presuming we don't have any bugs) decay
-	 * into either unset (successful reset) or the special WEDGED value (hw
-	 * terminally sour). All waiters on the reset_queue will be woken when
-	 * that happens.
 	 */
 	 */
 	atomic_t reset_counter;
 	atomic_t reset_counter;
 
 
-	/**
-	 * Special values/flags for reset_counter
-	 *
-	 * Note that the code relies on
-	 * 	I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
-	 * being true.
-	 */
 #define I915_RESET_IN_PROGRESS_FLAG	1
 #define I915_RESET_IN_PROGRESS_FLAG	1
-#define I915_WEDGED			0xffffffff
+#define I915_WEDGED			(1 << 31)
 
 
 	/**
 	/**
 	 * Waitqueue to signal when the reset has completed. Used by clients
 	 * Waitqueue to signal when the reset has completed. Used by clients
@@ -1368,13 +1395,8 @@ typedef struct drm_i915_private {
 	struct intel_overlay *overlay;
 	struct intel_overlay *overlay;
 	unsigned int sprite_scaling_enabled;
 	unsigned int sprite_scaling_enabled;
 
 
-	/* backlight */
-	struct {
-		int level;
-		bool enabled;
-		spinlock_t lock; /* bl registers and the above bl fields */
-		struct backlight_device *device;
-	} backlight;
+	/* backlight registers and fields in struct intel_panel */
+	spinlock_t backlight_lock;
 
 
 	/* LVDS info */
 	/* LVDS info */
 	bool no_aux_handshake;
 	bool no_aux_handshake;
@@ -1426,6 +1448,7 @@ typedef struct drm_i915_private {
 	int num_shared_dpll;
 	int num_shared_dpll;
 	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
 	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
 	struct intel_ddi_plls ddi_plls;
 	struct intel_ddi_plls ddi_plls;
+	int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
 
 
 	/* Reclocking support */
 	/* Reclocking support */
 	bool render_reclock_avail;
 	bool render_reclock_avail;
@@ -1470,7 +1493,6 @@ typedef struct drm_i915_private {
 	struct drm_property *broadcast_rgb_property;
 	struct drm_property *broadcast_rgb_property;
 	struct drm_property *force_audio_property;
 	struct drm_property *force_audio_property;
 
 
-	bool hw_contexts_disabled;
 	uint32_t hw_context_size;
 	uint32_t hw_context_size;
 	struct list_head context_list;
 	struct list_head context_list;
 
 
@@ -1755,8 +1777,13 @@ struct drm_i915_file_private {
 #define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
 #define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
 #define IS_HSW_EARLY_SDV(dev)	(IS_HASWELL(dev) && \
 #define IS_HSW_EARLY_SDV(dev)	(IS_HASWELL(dev) && \
 				 ((dev)->pdev->device & 0xFF00) == 0x0C00)
 				 ((dev)->pdev->device & 0xFF00) == 0x0C00)
-#define IS_ULT(dev)		(IS_HASWELL(dev) && \
+#define IS_BDW_ULT(dev)		(IS_BROADWELL(dev) && \
+				 (((dev)->pdev->device & 0xf) == 0x2  || \
+				 ((dev)->pdev->device & 0xf) == 0x6 || \
+				 ((dev)->pdev->device & 0xf) == 0xe))
+#define IS_HSW_ULT(dev)		(IS_HASWELL(dev) && \
 				 ((dev)->pdev->device & 0xFF00) == 0x0A00)
 				 ((dev)->pdev->device & 0xFF00) == 0x0A00)
+#define IS_ULT(dev)		(IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
 #define IS_HSW_GT3(dev)		(IS_HASWELL(dev) && \
 #define IS_HSW_GT3(dev)		(IS_HASWELL(dev) && \
 				 ((dev)->pdev->device & 0x00F0) == 0x0020)
 				 ((dev)->pdev->device & 0x00F0) == 0x0020)
 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
@@ -1813,7 +1840,6 @@ struct drm_i915_file_private {
 #define HAS_IPS(dev)		(IS_ULT(dev) || IS_BROADWELL(dev))
 #define HAS_IPS(dev)		(IS_ULT(dev) || IS_BROADWELL(dev))
 
 
 #define HAS_DDI(dev)		(INTEL_INFO(dev)->has_ddi)
 #define HAS_DDI(dev)		(INTEL_INFO(dev)->has_ddi)
-#define HAS_POWER_WELL(dev)	(IS_HASWELL(dev) || IS_BROADWELL(dev))
 #define HAS_FPGA_DBG_UNCLAIMED(dev)	(INTEL_INFO(dev)->has_fpga_dbg)
 #define HAS_FPGA_DBG_UNCLAIMED(dev)	(INTEL_INFO(dev)->has_fpga_dbg)
 #define HAS_PSR(dev)		(IS_HASWELL(dev) || IS_BROADWELL(dev))
 #define HAS_PSR(dev)		(IS_HASWELL(dev) || IS_BROADWELL(dev))
 #define HAS_PC8(dev)		(IS_HASWELL(dev)) /* XXX HSW:ULX */
 #define HAS_PC8(dev)		(IS_HASWELL(dev)) /* XXX HSW:ULX */
@@ -1908,7 +1934,6 @@ extern void intel_pm_init(struct drm_device *dev);
 extern void intel_uncore_sanitize(struct drm_device *dev);
 extern void intel_uncore_sanitize(struct drm_device *dev);
 extern void intel_uncore_early_sanitize(struct drm_device *dev);
 extern void intel_uncore_early_sanitize(struct drm_device *dev);
 extern void intel_uncore_init(struct drm_device *dev);
 extern void intel_uncore_init(struct drm_device *dev);
-extern void intel_uncore_clear_errors(struct drm_device *dev);
 extern void intel_uncore_check_errors(struct drm_device *dev);
 extern void intel_uncore_check_errors(struct drm_device *dev);
 extern void intel_uncore_fini(struct drm_device *dev);
 extern void intel_uncore_fini(struct drm_device *dev);
 
 
@@ -2060,12 +2085,17 @@ int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
 {
 {
 	return unlikely(atomic_read(&error->reset_counter)
 	return unlikely(atomic_read(&error->reset_counter)
-			& I915_RESET_IN_PROGRESS_FLAG);
+			& (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
 }
 }
 
 
 static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
 static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
 {
 {
-	return atomic_read(&error->reset_counter) == I915_WEDGED;
+	return atomic_read(&error->reset_counter) & I915_WEDGED;
+}
+
+static inline u32 i915_reset_count(struct i915_gpu_error *error)
+{
+	return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
 }
 }
 
 
 void i915_gem_reset(struct drm_device *dev);
 void i915_gem_reset(struct drm_device *dev);
@@ -2177,7 +2207,7 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
 }
 }
 
 
 /* i915_gem_context.c */
 /* i915_gem_context.c */
-void i915_gem_context_init(struct drm_device *dev);
+int __must_check i915_gem_context_init(struct drm_device *dev);
 void i915_gem_context_fini(struct drm_device *dev);
 void i915_gem_context_fini(struct drm_device *dev);
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
 int i915_switch_context(struct intel_ring_buffer *ring,
 int i915_switch_context(struct intel_ring_buffer *ring,
@@ -2395,6 +2425,8 @@ extern int intel_enable_rc6(const struct drm_device *dev);
 extern bool i915_semaphore_is_enabled(struct drm_device *dev);
 extern bool i915_semaphore_is_enabled(struct drm_device *dev);
 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file);
 			struct drm_file *file);
+int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file);
 
 
 /* overlay */
 /* overlay */
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -2410,8 +2442,8 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
  * must be set to prevent GT core from power down and stale values being
  * must be set to prevent GT core from power down and stale values being
  * returned.
  * returned.
  */
  */
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
 
 
 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
@@ -2426,6 +2458,8 @@ u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
 u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
 void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
@@ -2435,8 +2469,27 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
 		     enum intel_sbi_destination destination);
 		     enum intel_sbi_destination destination);
 
 
-int vlv_gpu_freq(int ddr_freq, int val);
-int vlv_freq_opcode(int ddr_freq, int val);
+int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
+int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
+
+void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
+void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
+
+#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
+	(((reg) >= 0x2000 && (reg) < 0x4000) ||\
+	((reg) >= 0x5000 && (reg) < 0x8000) ||\
+	((reg) >= 0xB000 && (reg) < 0x12000) ||\
+	((reg) >= 0x2E000 && (reg) < 0x30000))
+
+#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
+	(((reg) >= 0x12000 && (reg) < 0x14000) ||\
+	((reg) >= 0x22000 && (reg) < 0x24000) ||\
+	((reg) >= 0x30000 && (reg) < 0x40000))
+
+#define FORCEWAKE_RENDER	(1 << 0)
+#define FORCEWAKE_MEDIA		(1 << 1)
+#define FORCEWAKE_ALL		(FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
+
 
 
 #define I915_READ8(reg)		dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
 #define I915_READ8(reg)		dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
 #define I915_WRITE8(reg, val)	dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
 #define I915_WRITE8(reg, val)	dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)

+ 7 - 1
drivers/gpu/drm/i915/i915_gem.c

@@ -4465,7 +4465,13 @@ i915_gem_init_hw(struct drm_device *dev)
 	 * XXX: There was some w/a described somewhere suggesting loading
 	 * XXX: There was some w/a described somewhere suggesting loading
 	 * contexts before PPGTT.
 	 * contexts before PPGTT.
 	 */
 	 */
-	i915_gem_context_init(dev);
+	ret = i915_gem_context_init(dev);
+	if (ret) {
+		i915_gem_cleanup_ringbuffer(dev);
+		DRM_ERROR("Context initialization failed %d\n", ret);
+		return ret;
+	}
+
 	if (dev_priv->mm.aliasing_ppgtt) {
 	if (dev_priv->mm.aliasing_ppgtt) {
 		ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
 		ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
 		if (ret) {
 		if (ret) {

+ 20 - 25
drivers/gpu/drm/i915/i915_gem_context.c

@@ -247,36 +247,34 @@ err_destroy:
 	return ret;
 	return ret;
 }
 }
 
 
-void i915_gem_context_init(struct drm_device *dev)
+int i915_gem_context_init(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
 
 
-	if (!HAS_HW_CONTEXTS(dev)) {
-		dev_priv->hw_contexts_disabled = true;
-		DRM_DEBUG_DRIVER("Disabling HW Contexts; old hardware\n");
-		return;
-	}
+	if (!HAS_HW_CONTEXTS(dev))
+		return 0;
 
 
 	/* If called from reset, or thaw... we've been here already */
 	/* If called from reset, or thaw... we've been here already */
-	if (dev_priv->hw_contexts_disabled ||
-	    dev_priv->ring[RCS].default_context)
-		return;
+	if (dev_priv->ring[RCS].default_context)
+		return 0;
 
 
 	dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
 	dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
 
 
 	if (dev_priv->hw_context_size > (1<<20)) {
 	if (dev_priv->hw_context_size > (1<<20)) {
-		dev_priv->hw_contexts_disabled = true;
 		DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
 		DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
-		return;
+		return -E2BIG;
 	}
 	}
 
 
-	if (create_default_context(dev_priv)) {
-		dev_priv->hw_contexts_disabled = true;
-		DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed\n");
-		return;
+	ret = create_default_context(dev_priv);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %d\n",
+				 ret);
+		return ret;
 	}
 	}
 
 
 	DRM_DEBUG_DRIVER("HW context support initialized\n");
 	DRM_DEBUG_DRIVER("HW context support initialized\n");
+	return 0;
 }
 }
 
 
 void i915_gem_context_fini(struct drm_device *dev)
 void i915_gem_context_fini(struct drm_device *dev)
@@ -284,7 +282,7 @@ void i915_gem_context_fini(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
 	struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
 
 
-	if (dev_priv->hw_contexts_disabled)
+	if (!HAS_HW_CONTEXTS(dev))
 		return;
 		return;
 
 
 	/* The only known way to stop the gpu from accessing the hw context is
 	/* The only known way to stop the gpu from accessing the hw context is
@@ -327,16 +325,16 @@ i915_gem_context_get_hang_stats(struct drm_device *dev,
 				struct drm_file *file,
 				struct drm_file *file,
 				u32 id)
 				u32 id)
 {
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 	struct i915_hw_context *ctx;
 	struct i915_hw_context *ctx;
 
 
 	if (id == DEFAULT_CONTEXT_ID)
 	if (id == DEFAULT_CONTEXT_ID)
 		return &file_priv->hang_stats;
 		return &file_priv->hang_stats;
 
 
-	ctx = NULL;
-	if (!dev_priv->hw_contexts_disabled)
-		ctx = i915_gem_context_get(file->driver_priv, id);
+	if (!HAS_HW_CONTEXTS(dev))
+		return ERR_PTR(-ENOENT);
+
+	ctx = i915_gem_context_get(file->driver_priv, id);
 	if (ctx == NULL)
 	if (ctx == NULL)
 		return ERR_PTR(-ENOENT);
 		return ERR_PTR(-ENOENT);
 
 
@@ -494,8 +492,6 @@ static int do_switch(struct i915_hw_context *to)
  * @ring: ring for which we'll execute the context switch
  * @ring: ring for which we'll execute the context switch
  * @file_priv: file_priv associated with the context, may be NULL
  * @file_priv: file_priv associated with the context, may be NULL
  * @id: context id number
  * @id: context id number
- * @seqno: sequence number by which the new context will be switched to
- * @flags:
  *
  *
  * The context life cycle is simple. The context refcount is incremented and
  * The context life cycle is simple. The context refcount is incremented and
  * decremented by 1 and create and destroy. If the context is in use by the GPU,
  * decremented by 1 and create and destroy. If the context is in use by the GPU,
@@ -509,7 +505,7 @@ int i915_switch_context(struct intel_ring_buffer *ring,
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 	struct i915_hw_context *to;
 	struct i915_hw_context *to;
 
 
-	if (dev_priv->hw_contexts_disabled)
+	if (!HAS_HW_CONTEXTS(ring->dev))
 		return 0;
 		return 0;
 
 
 	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
@@ -534,7 +530,6 @@ int i915_switch_context(struct intel_ring_buffer *ring,
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 				  struct drm_file *file)
 				  struct drm_file *file)
 {
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_context_create *args = data;
 	struct drm_i915_gem_context_create *args = data;
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 	struct i915_hw_context *ctx;
 	struct i915_hw_context *ctx;
@@ -543,7 +538,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 	if (!(dev->driver->driver_features & DRIVER_GEM))
 	if (!(dev->driver->driver_features & DRIVER_GEM))
 		return -ENODEV;
 		return -ENODEV;
 
 
-	if (dev_priv->hw_contexts_disabled)
+	if (!HAS_HW_CONTEXTS(dev))
 		return -ENODEV;
 		return -ENODEV;
 
 
 	ret = i915_mutex_lock_interruptible(dev);
 	ret = i915_mutex_lock_interruptible(dev);

+ 5 - 6
drivers/gpu/drm/i915/i915_gem_execbuffer.c

@@ -46,7 +46,7 @@ struct eb_vmas {
 };
 };
 
 
 static struct eb_vmas *
 static struct eb_vmas *
-eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
+eb_create(struct drm_i915_gem_execbuffer2 *args)
 {
 {
 	struct eb_vmas *eb = NULL;
 	struct eb_vmas *eb = NULL;
 
 
@@ -332,7 +332,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 	target_i915_obj = target_vma->obj;
 	target_i915_obj = target_vma->obj;
 	target_obj = &target_vma->obj->base;
 	target_obj = &target_vma->obj->base;
 
 
-	target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
+	target_offset = target_vma->node.start;
 
 
 	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
 	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
 	 * pipe_control writes because the gpu doesn't properly redirect them
 	 * pipe_control writes because the gpu doesn't properly redirect them
@@ -479,8 +479,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
 }
 }
 
 
 static int
 static int
-i915_gem_execbuffer_relocate(struct eb_vmas *eb,
-			     struct i915_address_space *vm)
+i915_gem_execbuffer_relocate(struct eb_vmas *eb)
 {
 {
 	struct i915_vma *vma;
 	struct i915_vma *vma;
 	int ret = 0;
 	int ret = 0;
@@ -1106,7 +1105,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		goto pre_mutex_err;
 		goto pre_mutex_err;
 	}
 	}
 
 
-	eb = eb_create(args, vm);
+	eb = eb_create(args);
 	if (eb == NULL) {
 	if (eb == NULL) {
 		mutex_unlock(&dev->struct_mutex);
 		mutex_unlock(&dev->struct_mutex);
 		ret = -ENOMEM;
 		ret = -ENOMEM;
@@ -1129,7 +1128,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
 
 	/* The objects are in their final locations, apply the relocations. */
 	/* The objects are in their final locations, apply the relocations. */
 	if (need_relocs)
 	if (need_relocs)
-		ret = i915_gem_execbuffer_relocate(eb, vm);
+		ret = i915_gem_execbuffer_relocate(eb);
 	if (ret) {
 	if (ret) {
 		if (ret == -EFAULT) {
 		if (ret == -EFAULT) {
 			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
 			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,

+ 22 - 4
drivers/gpu/drm/i915/i915_gem_gtt.c

@@ -240,10 +240,16 @@ static int gen8_ppgtt_enable(struct drm_device *dev)
 		for_each_ring(ring, dev_priv, j) {
 		for_each_ring(ring, dev_priv, j) {
 			ret = gen8_write_pdp(ring, i, addr);
 			ret = gen8_write_pdp(ring, i, addr);
 			if (ret)
 			if (ret)
-				return ret;
+				goto err_out;
 		}
 		}
 	}
 	}
 	return 0;
 	return 0;
+
+err_out:
+	for_each_ring(ring, dev_priv, j)
+		I915_WRITE(RING_MODE_GEN7(ring),
+			   _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
+	return ret;
 }
 }
 
 
 static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
 static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
@@ -318,6 +324,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 		container_of(vm, struct i915_hw_ppgtt, base);
 		container_of(vm, struct i915_hw_ppgtt, base);
 	int i, j;
 	int i, j;
 
 
+	drm_mm_takedown(&vm->mm);
+
 	for (i = 0; i < ppgtt->num_pd_pages ; i++) {
 	for (i = 0; i < ppgtt->num_pd_pages ; i++) {
 		if (ppgtt->pd_dma_addr[i]) {
 		if (ppgtt->pd_dma_addr[i]) {
 			pci_unmap_page(ppgtt->base.dev->pdev,
 			pci_unmap_page(ppgtt->base.dev->pdev,
@@ -337,8 +345,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 		kfree(ppgtt->gen8_pt_dma_addr[i]);
 		kfree(ppgtt->gen8_pt_dma_addr[i]);
 	}
 	}
 
 
-	__free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT);
-	__free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT);
+	__free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
+	__free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
 }
 }
 
 
 /**
 /**
@@ -381,6 +389,8 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
 	ppgtt->base.clear_range = gen8_ppgtt_clear_range;
 	ppgtt->base.clear_range = gen8_ppgtt_clear_range;
 	ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
 	ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
 	ppgtt->base.cleanup = gen8_ppgtt_cleanup;
 	ppgtt->base.cleanup = gen8_ppgtt_cleanup;
+	ppgtt->base.start = 0;
+	ppgtt->base.total = ppgtt->num_pt_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE;
 
 
 	BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
 	BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
 
 
@@ -632,6 +642,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
 	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
 	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
 	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
 	ppgtt->base.scratch = dev_priv->gtt.base.scratch;
 	ppgtt->base.scratch = dev_priv->gtt.base.scratch;
+	ppgtt->base.start = 0;
+	ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
 	ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
 	ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
 				  GFP_KERNEL);
 				  GFP_KERNEL);
 	if (!ppgtt->pt_pages)
 	if (!ppgtt->pt_pages)
@@ -1126,7 +1138,6 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
 		if (ret)
 		if (ret)
 			DRM_DEBUG_KMS("Reservation failed\n");
 			DRM_DEBUG_KMS("Reservation failed\n");
 		obj->has_global_gtt_mapping = 1;
 		obj->has_global_gtt_mapping = 1;
-		list_add(&vma->vma_link, &obj->vma_list);
 	}
 	}
 
 
 	dev_priv->gtt.base.start = start;
 	dev_priv->gtt.base.start = start;
@@ -1241,6 +1252,11 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
 	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
 	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
 	if (bdw_gmch_ctl)
 	if (bdw_gmch_ctl)
 		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
 		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+	if (bdw_gmch_ctl > 4) {
+		WARN_ON(!i915_preliminary_hw_support);
+		return 4<<20;
+	}
+
 	return bdw_gmch_ctl << 20;
 	return bdw_gmch_ctl << 20;
 }
 }
 
 
@@ -1397,6 +1413,8 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
 {
 {
 
 
 	struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
 	struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
+
+	drm_mm_takedown(&vm->mm);
 	iounmap(gtt->gsm);
 	iounmap(gtt->gsm);
 	teardown_scratch_page(vm->dev);
 	teardown_scratch_page(vm->dev);
 }
 }

+ 11 - 6
drivers/gpu/drm/i915/i915_irq.c

@@ -600,7 +600,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
 	 * Cook up a vblank counter by also checking the pixel
 	 * Cook up a vblank counter by also checking the pixel
 	 * counter against vblank start.
 	 * counter against vblank start.
 	 */
 	 */
-	return ((high1 << 8) | low) + (pixel >= vbl_start);
+	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
 }
 }
 
 
 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@ -1015,10 +1015,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
 	/* sysfs frequency interfaces may have snuck in while servicing the
 	/* sysfs frequency interfaces may have snuck in while servicing the
 	 * interrupt
 	 * interrupt
 	 */
 	 */
-	if (new_delay < (int)dev_priv->rps.min_delay)
-		new_delay = dev_priv->rps.min_delay;
-	if (new_delay > (int)dev_priv->rps.max_delay)
-		new_delay = dev_priv->rps.max_delay;
+	new_delay = clamp_t(int, new_delay,
+			    dev_priv->rps.min_delay, dev_priv->rps.max_delay);
 	dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
 	dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
 
 
 	if (IS_VALLEYVIEW(dev_priv->dev))
 	if (IS_VALLEYVIEW(dev_priv->dev))
@@ -1474,6 +1472,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 
 
 			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
 			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
 
 
+			if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+				dp_aux_irq_handler(dev);
+
 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
 			I915_READ(PORT_HOTPLUG_STAT);
 			I915_READ(PORT_HOTPLUG_STAT);
 		}
 		}
@@ -1993,7 +1994,7 @@ static void i915_error_work_func(struct work_struct *work)
 			kobject_uevent_env(&dev->primary->kdev->kobj,
 			kobject_uevent_env(&dev->primary->kdev->kobj,
 					   KOBJ_CHANGE, reset_done_event);
 					   KOBJ_CHANGE, reset_done_event);
 		} else {
 		} else {
-			atomic_set(&error->reset_counter, I915_WEDGED);
+			atomic_set_mask(I915_WEDGED, &error->reset_counter);
 		}
 		}
 
 
 		/*
 		/*
@@ -3655,6 +3656,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
 			intel_hpd_irq_handler(dev, hotplug_trigger,
 			intel_hpd_irq_handler(dev, hotplug_trigger,
 					      IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
 					      IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
 
 
+			if (IS_G4X(dev) &&
+			    (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X))
+				dp_aux_irq_handler(dev);
+
 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
 			I915_READ(PORT_HOTPLUG_STAT);
 			I915_READ(PORT_HOTPLUG_STAT);
 		}
 		}

+ 114 - 103
drivers/gpu/drm/i915/i915_reg.h

@@ -354,6 +354,7 @@
 #define   IOSF_BYTE_ENABLES_SHIFT		4
 #define   IOSF_BYTE_ENABLES_SHIFT		4
 #define   IOSF_BAR_SHIFT			1
 #define   IOSF_BAR_SHIFT			1
 #define   IOSF_SB_BUSY				(1<<0)
 #define   IOSF_SB_BUSY				(1<<0)
+#define   IOSF_PORT_BUNIT			0x3
 #define   IOSF_PORT_PUNIT			0x4
 #define   IOSF_PORT_PUNIT			0x4
 #define   IOSF_PORT_NC				0x11
 #define   IOSF_PORT_NC				0x11
 #define   IOSF_PORT_DPIO			0x12
 #define   IOSF_PORT_DPIO			0x12
@@ -364,9 +365,17 @@
 #define VLV_IOSF_DATA				(VLV_DISPLAY_BASE + 0x2104)
 #define VLV_IOSF_DATA				(VLV_DISPLAY_BASE + 0x2104)
 #define VLV_IOSF_ADDR				(VLV_DISPLAY_BASE + 0x2108)
 #define VLV_IOSF_ADDR				(VLV_DISPLAY_BASE + 0x2108)
 
 
+/* See configdb bunit SB addr map */
+#define BUNIT_REG_BISOC				0x11
+
 #define PUNIT_OPCODE_REG_READ			6
 #define PUNIT_OPCODE_REG_READ			6
 #define PUNIT_OPCODE_REG_WRITE			7
 #define PUNIT_OPCODE_REG_WRITE			7
 
 
+#define PUNIT_REG_DSPFREQ			0x36
+#define   DSPFREQSTAT_SHIFT			30
+#define   DSPFREQSTAT_MASK			(0x3 << DSPFREQSTAT_SHIFT)
+#define   DSPFREQGUAR_SHIFT			14
+#define   DSPFREQGUAR_MASK			(0x3 << DSPFREQGUAR_SHIFT)
 #define PUNIT_REG_PWRGT_CTRL			0x60
 #define PUNIT_REG_PWRGT_CTRL			0x60
 #define PUNIT_REG_PWRGT_STATUS			0x61
 #define PUNIT_REG_PWRGT_STATUS			0x61
 #define	  PUNIT_CLK_GATE			1
 #define	  PUNIT_CLK_GATE			1
@@ -429,6 +438,7 @@
 #define  DSI_PLL_N1_DIV_MASK			(3 << 16)
 #define  DSI_PLL_N1_DIV_MASK			(3 << 16)
 #define  DSI_PLL_M1_DIV_SHIFT			0
 #define  DSI_PLL_M1_DIV_SHIFT			0
 #define  DSI_PLL_M1_DIV_MASK			(0x1ff << 0)
 #define  DSI_PLL_M1_DIV_MASK			(0x1ff << 0)
+#define CCK_DISPLAY_CLOCK_CONTROL		0x6b
 
 
 /*
 /*
  * DPIO - a special bus for various display related registers to hide behind
  * DPIO - a special bus for various display related registers to hide behind
@@ -447,15 +457,13 @@
 #define  DPIO_SFR_BYPASS		(1<<1)
 #define  DPIO_SFR_BYPASS		(1<<1)
 #define  DPIO_CMNRST			(1<<0)
 #define  DPIO_CMNRST			(1<<0)
 
 
-#define _DPIO_TX3_SWING_CTL4_A		0x690
-#define _DPIO_TX3_SWING_CTL4_B		0x2a90
-#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX3_SWING_CTL4_A, \
-					_DPIO_TX3_SWING_CTL4_B)
+#define DPIO_PHY(pipe)			((pipe) >> 1)
+#define DPIO_PHY_IOSF_PORT(phy)		(dev_priv->dpio_phy_iosf_port[phy])
 
 
 /*
 /*
  * Per pipe/PLL DPIO regs
  * Per pipe/PLL DPIO regs
  */
  */
-#define _DPIO_DIV_A			0x800c
+#define _VLV_PLL_DW3_CH0		0x800c
 #define   DPIO_POST_DIV_SHIFT		(28) /* 3 bits */
 #define   DPIO_POST_DIV_SHIFT		(28) /* 3 bits */
 #define   DPIO_POST_DIV_DAC		0
 #define   DPIO_POST_DIV_DAC		0
 #define   DPIO_POST_DIV_HDMIDP		1 /* DAC 225-400M rate */
 #define   DPIO_POST_DIV_HDMIDP		1 /* DAC 225-400M rate */
@@ -468,10 +476,10 @@
 #define   DPIO_ENABLE_CALIBRATION	(1<<11)
 #define   DPIO_ENABLE_CALIBRATION	(1<<11)
 #define   DPIO_M1DIV_SHIFT		(8) /* 3 bits */
 #define   DPIO_M1DIV_SHIFT		(8) /* 3 bits */
 #define   DPIO_M2DIV_MASK		0xff
 #define   DPIO_M2DIV_MASK		0xff
-#define _DPIO_DIV_B			0x802c
-#define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B)
+#define _VLV_PLL_DW3_CH1		0x802c
+#define VLV_PLL_DW3(ch) _PIPE(ch, _VLV_PLL_DW3_CH0, _VLV_PLL_DW3_CH1)
 
 
-#define _DPIO_REFSFR_A			0x8014
+#define _VLV_PLL_DW5_CH0		0x8014
 #define   DPIO_REFSEL_OVERRIDE		27
 #define   DPIO_REFSEL_OVERRIDE		27
 #define   DPIO_PLL_MODESEL_SHIFT	24 /* 3 bits */
 #define   DPIO_PLL_MODESEL_SHIFT	24 /* 3 bits */
 #define   DPIO_BIAS_CURRENT_CTL_SHIFT	21 /* 3 bits, always 0x7 */
 #define   DPIO_BIAS_CURRENT_CTL_SHIFT	21 /* 3 bits, always 0x7 */
@@ -479,118 +487,112 @@
 #define   DPIO_PLL_REFCLK_SEL_MASK	3
 #define   DPIO_PLL_REFCLK_SEL_MASK	3
 #define   DPIO_DRIVER_CTL_SHIFT		12 /* always set to 0x8 */
 #define   DPIO_DRIVER_CTL_SHIFT		12 /* always set to 0x8 */
 #define   DPIO_CLK_BIAS_CTL_SHIFT	8 /* always set to 0x5 */
 #define   DPIO_CLK_BIAS_CTL_SHIFT	8 /* always set to 0x5 */
-#define _DPIO_REFSFR_B			0x8034
-#define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B)
+#define _VLV_PLL_DW5_CH1		0x8034
+#define VLV_PLL_DW5(ch) _PIPE(ch, _VLV_PLL_DW5_CH0, _VLV_PLL_DW5_CH1)
 
 
-#define _DPIO_CORE_CLK_A		0x801c
-#define _DPIO_CORE_CLK_B		0x803c
-#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
+#define _VLV_PLL_DW7_CH0		0x801c
+#define _VLV_PLL_DW7_CH1		0x803c
+#define VLV_PLL_DW7(ch) _PIPE(ch, _VLV_PLL_DW7_CH0, _VLV_PLL_DW7_CH1)
 
 
-#define _DPIO_IREF_CTL_A		0x8040
-#define _DPIO_IREF_CTL_B		0x8060
-#define DPIO_IREF_CTL(pipe) _PIPE(pipe, _DPIO_IREF_CTL_A, _DPIO_IREF_CTL_B)
+#define _VLV_PLL_DW8_CH0		0x8040
+#define _VLV_PLL_DW8_CH1		0x8060
+#define VLV_PLL_DW8(ch) _PIPE(ch, _VLV_PLL_DW8_CH0, _VLV_PLL_DW8_CH1)
 
 
-#define DPIO_IREF_BCAST			0xc044
-#define _DPIO_IREF_A			0x8044
-#define _DPIO_IREF_B			0x8064
-#define DPIO_IREF(pipe) _PIPE(pipe, _DPIO_IREF_A, _DPIO_IREF_B)
+#define VLV_PLL_DW9_BCAST		0xc044
+#define _VLV_PLL_DW9_CH0		0x8044
+#define _VLV_PLL_DW9_CH1		0x8064
+#define VLV_PLL_DW9(ch) _PIPE(ch, _VLV_PLL_DW9_CH0, _VLV_PLL_DW9_CH1)
 
 
-#define _DPIO_PLL_CML_A			0x804c
-#define _DPIO_PLL_CML_B			0x806c
-#define DPIO_PLL_CML(pipe) _PIPE(pipe, _DPIO_PLL_CML_A, _DPIO_PLL_CML_B)
+#define _VLV_PLL_DW10_CH0		0x8048
+#define _VLV_PLL_DW10_CH1		0x8068
+#define VLV_PLL_DW10(ch) _PIPE(ch, _VLV_PLL_DW10_CH0, _VLV_PLL_DW10_CH1)
 
 
-#define _DPIO_LPF_COEFF_A		0x8048
-#define _DPIO_LPF_COEFF_B		0x8068
-#define DPIO_LPF_COEFF(pipe) _PIPE(pipe, _DPIO_LPF_COEFF_A, _DPIO_LPF_COEFF_B)
+#define _VLV_PLL_DW11_CH0		0x804c
+#define _VLV_PLL_DW11_CH1		0x806c
+#define VLV_PLL_DW11(ch) _PIPE(ch, _VLV_PLL_DW11_CH0, _VLV_PLL_DW11_CH1)
 
 
-#define DPIO_CALIBRATION		0x80ac
+/* Spec for ref block start counts at DW10 */
+#define VLV_REF_DW13			0x80ac
 
 
-#define DPIO_FASTCLK_DISABLE		0x8100
+#define VLV_CMN_DW0			0x8100
 
 
 /*
 /*
  * Per DDI channel DPIO regs
  * Per DDI channel DPIO regs
  */
  */
 
 
-#define _DPIO_PCS_TX_0			0x8200
-#define _DPIO_PCS_TX_1			0x8400
+#define _VLV_PCS_DW0_CH0		0x8200
+#define _VLV_PCS_DW0_CH1		0x8400
 #define   DPIO_PCS_TX_LANE2_RESET	(1<<16)
 #define   DPIO_PCS_TX_LANE2_RESET	(1<<16)
 #define   DPIO_PCS_TX_LANE1_RESET	(1<<7)
 #define   DPIO_PCS_TX_LANE1_RESET	(1<<7)
-#define DPIO_PCS_TX(port) _PORT(port, _DPIO_PCS_TX_0, _DPIO_PCS_TX_1)
+#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
 
 
-#define _DPIO_PCS_CLK_0			0x8204
-#define _DPIO_PCS_CLK_1			0x8404
+#define _VLV_PCS_DW1_CH0		0x8204
+#define _VLV_PCS_DW1_CH1		0x8404
 #define   DPIO_PCS_CLK_CRI_RXEB_EIOS_EN	(1<<22)
 #define   DPIO_PCS_CLK_CRI_RXEB_EIOS_EN	(1<<22)
 #define   DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
 #define   DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
 #define   DPIO_PCS_CLK_DATAWIDTH_SHIFT	(6)
 #define   DPIO_PCS_CLK_DATAWIDTH_SHIFT	(6)
 #define   DPIO_PCS_CLK_SOFT_RESET	(1<<5)
 #define   DPIO_PCS_CLK_SOFT_RESET	(1<<5)
-#define DPIO_PCS_CLK(port) _PORT(port, _DPIO_PCS_CLK_0, _DPIO_PCS_CLK_1)
-
-#define _DPIO_PCS_CTL_OVR1_A		0x8224
-#define _DPIO_PCS_CTL_OVR1_B		0x8424
-#define DPIO_PCS_CTL_OVER1(port) _PORT(port, _DPIO_PCS_CTL_OVR1_A, \
-				       _DPIO_PCS_CTL_OVR1_B)
-
-#define _DPIO_PCS_STAGGER0_A		0x822c
-#define _DPIO_PCS_STAGGER0_B		0x842c
-#define DPIO_PCS_STAGGER0(port) _PORT(port, _DPIO_PCS_STAGGER0_A, \
-				      _DPIO_PCS_STAGGER0_B)
-
-#define _DPIO_PCS_STAGGER1_A		0x8230
-#define _DPIO_PCS_STAGGER1_B		0x8430
-#define DPIO_PCS_STAGGER1(port) _PORT(port, _DPIO_PCS_STAGGER1_A, \
-				      _DPIO_PCS_STAGGER1_B)
-
-#define _DPIO_PCS_CLOCKBUF0_A		0x8238
-#define _DPIO_PCS_CLOCKBUF0_B		0x8438
-#define DPIO_PCS_CLOCKBUF0(port) _PORT(port, _DPIO_PCS_CLOCKBUF0_A, \
-				       _DPIO_PCS_CLOCKBUF0_B)
-
-#define _DPIO_PCS_CLOCKBUF8_A		0x825c
-#define _DPIO_PCS_CLOCKBUF8_B		0x845c
-#define DPIO_PCS_CLOCKBUF8(port) _PORT(port, _DPIO_PCS_CLOCKBUF8_A, \
-				       _DPIO_PCS_CLOCKBUF8_B)
-
-#define _DPIO_TX_SWING_CTL2_A		0x8288
-#define _DPIO_TX_SWING_CTL2_B		0x8488
-#define DPIO_TX_SWING_CTL2(port) _PORT(port, _DPIO_TX_SWING_CTL2_A, \
-				       _DPIO_TX_SWING_CTL2_B)
-
-#define _DPIO_TX_SWING_CTL3_A		0x828c
-#define _DPIO_TX_SWING_CTL3_B		0x848c
-#define DPIO_TX_SWING_CTL3(port) _PORT(port, _DPIO_TX_SWING_CTL3_A, \
-				       _DPIO_TX_SWING_CTL3_B)
-
-#define _DPIO_TX_SWING_CTL4_A		0x8290
-#define _DPIO_TX_SWING_CTL4_B		0x8490
-#define DPIO_TX_SWING_CTL4(port) _PORT(port, _DPIO_TX_SWING_CTL4_A, \
-				       _DPIO_TX_SWING_CTL4_B)
-
-#define _DPIO_TX_OCALINIT_0		0x8294
-#define _DPIO_TX_OCALINIT_1		0x8494
+#define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1)
+
+#define _VLV_PCS_DW8_CH0		0x8220
+#define _VLV_PCS_DW8_CH1		0x8420
+#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
+
+#define _VLV_PCS01_DW8_CH0		0x0220
+#define _VLV_PCS23_DW8_CH0		0x0420
+#define _VLV_PCS01_DW8_CH1		0x2620
+#define _VLV_PCS23_DW8_CH1		0x2820
+#define VLV_PCS01_DW8(port) _PORT(port, _VLV_PCS01_DW8_CH0, _VLV_PCS01_DW8_CH1)
+#define VLV_PCS23_DW8(port) _PORT(port, _VLV_PCS23_DW8_CH0, _VLV_PCS23_DW8_CH1)
+
+#define _VLV_PCS_DW9_CH0		0x8224
+#define _VLV_PCS_DW9_CH1		0x8424
+#define	VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
+
+#define _VLV_PCS_DW11_CH0		0x822c
+#define _VLV_PCS_DW11_CH1		0x842c
+#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
+
+#define _VLV_PCS_DW12_CH0		0x8230
+#define _VLV_PCS_DW12_CH1		0x8430
+#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
+
+#define _VLV_PCS_DW14_CH0		0x8238
+#define _VLV_PCS_DW14_CH1		0x8438
+#define	VLV_PCS_DW14(ch) _PORT(ch, _VLV_PCS_DW14_CH0, _VLV_PCS_DW14_CH1)
+
+#define _VLV_PCS_DW23_CH0		0x825c
+#define _VLV_PCS_DW23_CH1		0x845c
+#define VLV_PCS_DW23(ch) _PORT(ch, _VLV_PCS_DW23_CH0, _VLV_PCS_DW23_CH1)
+
+#define _VLV_TX_DW2_CH0			0x8288
+#define _VLV_TX_DW2_CH1			0x8488
+#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1)
+
+#define _VLV_TX_DW3_CH0			0x828c
+#define _VLV_TX_DW3_CH1			0x848c
+#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
+
+#define _VLV_TX_DW4_CH0			0x8290
+#define _VLV_TX_DW4_CH1			0x8490
+#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1)
+
+#define _VLV_TX3_DW4_CH0		0x690
+#define _VLV_TX3_DW4_CH1		0x2a90
+#define VLV_TX3_DW4(ch) _PORT(ch, _VLV_TX3_DW4_CH0, _VLV_TX3_DW4_CH1)
+
+#define _VLV_TX_DW5_CH0			0x8294
+#define _VLV_TX_DW5_CH1			0x8494
 #define   DPIO_TX_OCALINIT_EN		(1<<31)
 #define   DPIO_TX_OCALINIT_EN		(1<<31)
-#define DPIO_TX_OCALINIT(port) _PORT(port, _DPIO_TX_OCALINIT_0, \
-				     _DPIO_TX_OCALINIT_1)
-
-#define _DPIO_TX_CTL_0			0x82ac
-#define _DPIO_TX_CTL_1			0x84ac
-#define DPIO_TX_CTL(port) _PORT(port, _DPIO_TX_CTL_0, _DPIO_TX_CTL_1)
-
-#define _DPIO_TX_LANE_0			0x82b8
-#define _DPIO_TX_LANE_1			0x84b8
-#define DPIO_TX_LANE(port) _PORT(port, _DPIO_TX_LANE_0, _DPIO_TX_LANE_1)
-
-#define _DPIO_DATA_CHANNEL1		0x8220
-#define _DPIO_DATA_CHANNEL2		0x8420
-#define DPIO_DATA_CHANNEL(port) _PORT(port, _DPIO_DATA_CHANNEL1, _DPIO_DATA_CHANNEL2)
-
-#define _DPIO_PORT0_PCS0		0x0220
-#define _DPIO_PORT0_PCS1		0x0420
-#define _DPIO_PORT1_PCS2		0x2620
-#define _DPIO_PORT1_PCS3		0x2820
-#define DPIO_DATA_LANE_A(port) _PORT(port, _DPIO_PORT0_PCS0, _DPIO_PORT1_PCS2)
-#define DPIO_DATA_LANE_B(port) _PORT(port, _DPIO_PORT0_PCS1, _DPIO_PORT1_PCS3)
-#define DPIO_DATA_CHANNEL1              0x8220
-#define DPIO_DATA_CHANNEL2              0x8420
+#define VLV_TX_DW5(ch) _PORT(ch, _VLV_TX_DW5_CH0, _VLV_TX_DW5_CH1)
+
+#define _VLV_TX_DW11_CH0		0x82ac
+#define _VLV_TX_DW11_CH1		0x84ac
+#define VLV_TX_DW11(ch) _PORT(ch, _VLV_TX_DW11_CH0, _VLV_TX_DW11_CH1)
+
+#define _VLV_TX_DW14_CH0		0x82b8
+#define _VLV_TX_DW14_CH1		0x84b8
+#define VLV_TX_DW14(ch) _PORT(ch, _VLV_TX_DW14_CH0, _VLV_TX_DW14_CH1)
 
 
 /*
 /*
  * Fence registers
  * Fence registers
@@ -2130,6 +2132,10 @@
 #define   CRT_HOTPLUG_MONITOR_COLOR		(3 << 8)
 #define   CRT_HOTPLUG_MONITOR_COLOR		(3 << 8)
 #define   CRT_HOTPLUG_MONITOR_MONO		(2 << 8)
 #define   CRT_HOTPLUG_MONITOR_MONO		(2 << 8)
 #define   CRT_HOTPLUG_MONITOR_NONE		(0 << 8)
 #define   CRT_HOTPLUG_MONITOR_NONE		(0 << 8)
+#define   DP_AUX_CHANNEL_D_INT_STATUS_G4X	(1 << 6)
+#define   DP_AUX_CHANNEL_C_INT_STATUS_G4X	(1 << 5)
+#define   DP_AUX_CHANNEL_B_INT_STATUS_G4X	(1 << 4)
+#define   DP_AUX_CHANNEL_MASK_INT_STATUS_G4X	(1 << 4)
 /* SDVO is different across gen3/4 */
 /* SDVO is different across gen3/4 */
 #define   SDVOC_HOTPLUG_INT_STATUS_G4X		(1 << 3)
 #define   SDVOC_HOTPLUG_INT_STATUS_G4X		(1 << 3)
 #define   SDVOB_HOTPLUG_INT_STATUS_G4X		(1 << 2)
 #define   SDVOB_HOTPLUG_INT_STATUS_G4X		(1 << 2)
@@ -3787,7 +3793,7 @@
 
 
 #define _SPACNTR		(VLV_DISPLAY_BASE + 0x72180)
 #define _SPACNTR		(VLV_DISPLAY_BASE + 0x72180)
 #define   SP_ENABLE			(1<<31)
 #define   SP_ENABLE			(1<<31)
-#define   SP_GEAMMA_ENABLE		(1<<30)
+#define   SP_GAMMA_ENABLE		(1<<30)
 #define   SP_PIXFORMAT_MASK		(0xf<<26)
 #define   SP_PIXFORMAT_MASK		(0xf<<26)
 #define   SP_FORMAT_YUV422		(0<<26)
 #define   SP_FORMAT_YUV422		(0<<26)
 #define   SP_FORMAT_BGR565		(5<<26)
 #define   SP_FORMAT_BGR565		(5<<26)
@@ -4851,12 +4857,16 @@
 #define    FORCEWAKE_MT_ENABLE			(1<<5)
 #define    FORCEWAKE_MT_ENABLE			(1<<5)
 
 
 #define  GTFIFODBG				0x120000
 #define  GTFIFODBG				0x120000
-#define    GT_FIFO_CPU_ERROR_MASK		7
+#define    GT_FIFO_SBDROPERR			(1<<6)
+#define    GT_FIFO_BLOBDROPERR			(1<<5)
+#define    GT_FIFO_SB_READ_ABORTERR		(1<<4)
+#define    GT_FIFO_DROPERR			(1<<3)
 #define    GT_FIFO_OVFERR			(1<<2)
 #define    GT_FIFO_OVFERR			(1<<2)
 #define    GT_FIFO_IAWRERR			(1<<1)
 #define    GT_FIFO_IAWRERR			(1<<1)
 #define    GT_FIFO_IARDERR			(1<<0)
 #define    GT_FIFO_IARDERR			(1<<0)
 
 
-#define  GT_FIFO_FREE_ENTRIES			0x120008
+#define  GTFIFOCTL				0x120008
+#define    GT_FIFO_FREE_ENTRIES_MASK		0x7f
 #define    GT_FIFO_NUM_RESERVED_ENTRIES		20
 #define    GT_FIFO_NUM_RESERVED_ENTRIES		20
 
 
 #define  HSW_IDICR				0x9008
 #define  HSW_IDICR				0x9008
@@ -4890,6 +4900,7 @@
 #define   GEN6_RC_CTL_RC6_ENABLE		(1<<18)
 #define   GEN6_RC_CTL_RC6_ENABLE		(1<<18)
 #define   GEN6_RC_CTL_RC1e_ENABLE		(1<<20)
 #define   GEN6_RC_CTL_RC1e_ENABLE		(1<<20)
 #define   GEN6_RC_CTL_RC7_ENABLE		(1<<22)
 #define   GEN6_RC_CTL_RC7_ENABLE		(1<<22)
+#define   VLV_RC_CTL_CTX_RST_PARALLEL		(1<<24)
 #define   GEN7_RC_CTL_TO_MODE			(1<<28)
 #define   GEN7_RC_CTL_TO_MODE			(1<<28)
 #define   GEN6_RC_CTL_EI_MODE(x)		((x)<<27)
 #define   GEN6_RC_CTL_EI_MODE(x)		((x)<<27)
 #define   GEN6_RC_CTL_HW_ENABLE			(1<<31)
 #define   GEN6_RC_CTL_HW_ENABLE			(1<<31)

+ 0 - 45
drivers/gpu/drm/i915/i915_suspend.c

@@ -192,7 +192,6 @@ static void i915_restore_vga(struct drm_device *dev)
 static void i915_save_display(struct drm_device *dev)
 static void i915_save_display(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
 
 
 	/* Display arbitration control */
 	/* Display arbitration control */
 	if (INTEL_INFO(dev)->gen <= 4)
 	if (INTEL_INFO(dev)->gen <= 4)
@@ -203,46 +202,27 @@ static void i915_save_display(struct drm_device *dev)
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		i915_save_display_reg(dev);
 		i915_save_display_reg(dev);
 
 
-	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
-
 	/* LVDS state */
 	/* LVDS state */
 	if (HAS_PCH_SPLIT(dev)) {
 	if (HAS_PCH_SPLIT(dev)) {
 		dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
 		dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
-		dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
-		dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
-		dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
-		dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
 		if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
 		if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
 			dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
 			dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
 	} else if (IS_VALLEYVIEW(dev)) {
 	} else if (IS_VALLEYVIEW(dev)) {
 		dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
 		dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
 		dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
 		dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
 
 
-		dev_priv->regfile.saveBLC_PWM_CTL =
-			I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
 		dev_priv->regfile.saveBLC_HIST_CTL =
 		dev_priv->regfile.saveBLC_HIST_CTL =
 			I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
 			I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
-		dev_priv->regfile.saveBLC_PWM_CTL2 =
-			I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
-		dev_priv->regfile.saveBLC_PWM_CTL_B =
-			I915_READ(VLV_BLC_PWM_CTL(PIPE_B));
 		dev_priv->regfile.saveBLC_HIST_CTL_B =
 		dev_priv->regfile.saveBLC_HIST_CTL_B =
 			I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
 			I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
-		dev_priv->regfile.saveBLC_PWM_CTL2_B =
-			I915_READ(VLV_BLC_PWM_CTL2(PIPE_B));
 	} else {
 	} else {
 		dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
 		dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
 		dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
 		dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
-		dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
 		dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
 		dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
-		if (INTEL_INFO(dev)->gen >= 4)
-			dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
 		if (IS_MOBILE(dev) && !IS_I830(dev))
 		if (IS_MOBILE(dev) && !IS_I830(dev))
 			dev_priv->regfile.saveLVDS = I915_READ(LVDS);
 			dev_priv->regfile.saveLVDS = I915_READ(LVDS);
 	}
 	}
 
 
-	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
-
 	if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
 	if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
 		dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
 		dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
 
 
@@ -278,7 +258,6 @@ static void i915_restore_display(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 mask = 0xffffffff;
 	u32 mask = 0xffffffff;
-	unsigned long flags;
 
 
 	/* Display arbitration */
 	/* Display arbitration */
 	if (INTEL_INFO(dev)->gen <= 4)
 	if (INTEL_INFO(dev)->gen <= 4)
@@ -287,12 +266,6 @@ static void i915_restore_display(struct drm_device *dev)
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		i915_restore_display_reg(dev);
 		i915_restore_display_reg(dev);
 
 
-	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
-
-	/* LVDS state */
-	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
-		I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
-
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		mask = ~LVDS_PORT_EN;
 		mask = ~LVDS_PORT_EN;
 
 
@@ -305,13 +278,6 @@ static void i915_restore_display(struct drm_device *dev)
 		I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
 		I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
 
 
 	if (HAS_PCH_SPLIT(dev)) {
 	if (HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
-		I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
-		/* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
-		 * otherwise we get blank eDP screen after S3 on some machines
-		 */
-		I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
-		I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
 		I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
 		I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
 		I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
 		I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
 		I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
 		I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
@@ -319,21 +285,12 @@ static void i915_restore_display(struct drm_device *dev)
 		I915_WRITE(RSTDBYCTL,
 		I915_WRITE(RSTDBYCTL,
 			   dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
 			   dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
 	} else if (IS_VALLEYVIEW(dev)) {
 	} else if (IS_VALLEYVIEW(dev)) {
-		I915_WRITE(VLV_BLC_PWM_CTL(PIPE_A),
-			   dev_priv->regfile.saveBLC_PWM_CTL);
 		I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
 		I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
 			   dev_priv->regfile.saveBLC_HIST_CTL);
 			   dev_priv->regfile.saveBLC_HIST_CTL);
-		I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_A),
-			   dev_priv->regfile.saveBLC_PWM_CTL2);
-		I915_WRITE(VLV_BLC_PWM_CTL(PIPE_B),
-			   dev_priv->regfile.saveBLC_PWM_CTL);
 		I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
 		I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
 			   dev_priv->regfile.saveBLC_HIST_CTL);
 			   dev_priv->regfile.saveBLC_HIST_CTL);
-		I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_B),
-			   dev_priv->regfile.saveBLC_PWM_CTL2);
 	} else {
 	} else {
 		I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
 		I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
-		I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
 		I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
 		I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
 		I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
 		I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
 		I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
 		I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
@@ -341,8 +298,6 @@ static void i915_restore_display(struct drm_device *dev)
 		I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
 		I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
 	}
 	}
 
 
-	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
-
 	/* only restore FBC info on the platform that supports FBC*/
 	/* only restore FBC info on the platform that supports FBC*/
 	intel_disable_fbc(dev);
 	intel_disable_fbc(dev);
 	if (I915_HAS_FBC(dev)) {
 	if (I915_HAS_FBC(dev)) {

+ 17 - 18
drivers/gpu/drm/i915/i915_sysfs.c

@@ -183,13 +183,13 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
 	int slice = (int)(uintptr_t)attr->private;
 	int slice = (int)(uintptr_t)attr->private;
 	int ret;
 	int ret;
 
 
+	if (!HAS_HW_CONTEXTS(drm_dev))
+		return -ENXIO;
+
 	ret = l3_access_valid(drm_dev, offset);
 	ret = l3_access_valid(drm_dev, offset);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	if (dev_priv->hw_contexts_disabled)
-		return -ENXIO;
-
 	ret = i915_mutex_lock_interruptible(drm_dev);
 	ret = i915_mutex_lock_interruptible(drm_dev);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
@@ -259,7 +259,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
 	if (IS_VALLEYVIEW(dev_priv->dev)) {
 	if (IS_VALLEYVIEW(dev_priv->dev)) {
 		u32 freq;
 		u32 freq;
 		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-		ret = vlv_gpu_freq(dev_priv->mem_freq, (freq >> 8) & 0xff);
+		ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
 	} else {
 	} else {
 		ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
 		ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
 	}
 	}
@@ -276,8 +276,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
 	return snprintf(buf, PAGE_SIZE, "%d\n",
-			vlv_gpu_freq(dev_priv->mem_freq,
-				     dev_priv->rps.rpe_delay));
+			vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay));
 }
 }
 
 
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -291,7 +290,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
 
 
 	mutex_lock(&dev_priv->rps.hw_lock);
 	mutex_lock(&dev_priv->rps.hw_lock);
 	if (IS_VALLEYVIEW(dev_priv->dev))
 	if (IS_VALLEYVIEW(dev_priv->dev))
-		ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
+		ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay);
 	else
 	else
 		ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
 		ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
 	mutex_unlock(&dev_priv->rps.hw_lock);
 	mutex_unlock(&dev_priv->rps.hw_lock);
@@ -318,7 +317,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 	mutex_lock(&dev_priv->rps.hw_lock);
 	mutex_lock(&dev_priv->rps.hw_lock);
 
 
 	if (IS_VALLEYVIEW(dev_priv->dev)) {
 	if (IS_VALLEYVIEW(dev_priv->dev)) {
-		val = vlv_freq_opcode(dev_priv->mem_freq, val);
+		val = vlv_freq_opcode(dev_priv, val);
 
 
 		hw_max = valleyview_rps_max_freq(dev_priv);
 		hw_max = valleyview_rps_max_freq(dev_priv);
 		hw_min = valleyview_rps_min_freq(dev_priv);
 		hw_min = valleyview_rps_min_freq(dev_priv);
@@ -342,15 +341,15 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 		DRM_DEBUG("User requested overclocking to %d\n",
 		DRM_DEBUG("User requested overclocking to %d\n",
 			  val * GT_FREQUENCY_MULTIPLIER);
 			  val * GT_FREQUENCY_MULTIPLIER);
 
 
+	dev_priv->rps.max_delay = val;
+
 	if (dev_priv->rps.cur_delay > val) {
 	if (dev_priv->rps.cur_delay > val) {
-		if (IS_VALLEYVIEW(dev_priv->dev))
-			valleyview_set_rps(dev_priv->dev, val);
+		if (IS_VALLEYVIEW(dev))
+			valleyview_set_rps(dev, val);
 		else
 		else
-			gen6_set_rps(dev_priv->dev, val);
+			gen6_set_rps(dev, val);
 	}
 	}
 
 
-	dev_priv->rps.max_delay = val;
-
 	mutex_unlock(&dev_priv->rps.hw_lock);
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
 
 	return count;
 	return count;
@@ -367,7 +366,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
 
 
 	mutex_lock(&dev_priv->rps.hw_lock);
 	mutex_lock(&dev_priv->rps.hw_lock);
 	if (IS_VALLEYVIEW(dev_priv->dev))
 	if (IS_VALLEYVIEW(dev_priv->dev))
-		ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
+		ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay);
 	else
 	else
 		ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
 		ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
 	mutex_unlock(&dev_priv->rps.hw_lock);
 	mutex_unlock(&dev_priv->rps.hw_lock);
@@ -394,7 +393,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 	mutex_lock(&dev_priv->rps.hw_lock);
 	mutex_lock(&dev_priv->rps.hw_lock);
 
 
 	if (IS_VALLEYVIEW(dev)) {
 	if (IS_VALLEYVIEW(dev)) {
-		val = vlv_freq_opcode(dev_priv->mem_freq, val);
+		val = vlv_freq_opcode(dev_priv, val);
 
 
 		hw_max = valleyview_rps_max_freq(dev_priv);
 		hw_max = valleyview_rps_max_freq(dev_priv);
 		hw_min = valleyview_rps_min_freq(dev_priv);
 		hw_min = valleyview_rps_min_freq(dev_priv);
@@ -411,15 +410,15 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
+	dev_priv->rps.min_delay = val;
+
 	if (dev_priv->rps.cur_delay < val) {
 	if (dev_priv->rps.cur_delay < val) {
 		if (IS_VALLEYVIEW(dev))
 		if (IS_VALLEYVIEW(dev))
 			valleyview_set_rps(dev, val);
 			valleyview_set_rps(dev, val);
 		else
 		else
-			gen6_set_rps(dev_priv->dev, val);
+			gen6_set_rps(dev, val);
 	}
 	}
 
 
-	dev_priv->rps.min_delay = val;
-
 	mutex_unlock(&dev_priv->rps.hw_lock);
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
 
 	return count;
 	return count;

+ 27 - 0
drivers/gpu/drm/i915/i915_ums.c

@@ -270,6 +270,18 @@ void i915_save_display_reg(struct drm_device *dev)
 	}
 	}
 	/* FIXME: regfile.save TV & SDVO state */
 	/* FIXME: regfile.save TV & SDVO state */
 
 
+	/* Backlight */
+	if (HAS_PCH_SPLIT(dev)) {
+		dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
+		dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
+		dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
+		dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
+	} else {
+		dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+		if (INTEL_INFO(dev)->gen >= 4)
+			dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+	}
+
 	return;
 	return;
 }
 }
 
 
@@ -280,6 +292,21 @@ void i915_restore_display_reg(struct drm_device *dev)
 	int dpll_b_reg, fpb0_reg, fpb1_reg;
 	int dpll_b_reg, fpb0_reg, fpb1_reg;
 	int i;
 	int i;
 
 
+	/* Backlight */
+	if (HAS_PCH_SPLIT(dev)) {
+		I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
+		I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
+		/* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
+		 * otherwise we get blank eDP screen after S3 on some machines
+		 */
+		I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
+		I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
+	} else {
+		if (INTEL_INFO(dev)->gen >= 4)
+			I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
+		I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
+	}
+
 	/* Display port ratios (must be done before clock is set) */
 	/* Display port ratios (must be done before clock is set) */
 	if (SUPPORTS_INTEGRATED_DP(dev)) {
 	if (SUPPORTS_INTEGRATED_DP(dev)) {
 		I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
 		I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M);

+ 3 - 2
drivers/gpu/drm/i915/intel_crt.c

@@ -222,8 +222,9 @@ static void intel_crt_dpms(struct drm_connector *connector, int mode)
 	intel_modeset_check_state(connector->dev);
 	intel_modeset_check_state(connector->dev);
 }
 }
 
 
-static int intel_crt_mode_valid(struct drm_connector *connector,
-				struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_crt_mode_valid(struct drm_connector *connector,
+		     struct drm_display_mode *mode)
 {
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_device *dev = connector->dev;
 
 

+ 22 - 15
drivers/gpu/drm/i915/intel_ddi.c

@@ -713,8 +713,6 @@ bool intel_ddi_pll_mode_set(struct drm_crtc *crtc)
 	uint32_t reg, val;
 	uint32_t reg, val;
 	int clock = intel_crtc->config.port_clock;
 	int clock = intel_crtc->config.port_clock;
 
 
-	/* TODO: reuse PLLs when possible (compare values) */
-
 	intel_ddi_put_crtc_pll(crtc);
 	intel_ddi_put_crtc_pll(crtc);
 
 
 	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
 	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
@@ -742,31 +740,40 @@ bool intel_ddi_pll_mode_set(struct drm_crtc *crtc)
 	} else if (type == INTEL_OUTPUT_HDMI) {
 	} else if (type == INTEL_OUTPUT_HDMI) {
 		unsigned p, n2, r2;
 		unsigned p, n2, r2;
 
 
-		if (plls->wrpll1_refcount == 0) {
+		intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+
+		val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+		      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+		      WRPLL_DIVIDER_POST(p);
+
+		if (val == I915_READ(WRPLL_CTL1)) {
+			DRM_DEBUG_KMS("Reusing WRPLL 1 on pipe %c\n",
+				      pipe_name(pipe));
+			reg = WRPLL_CTL1;
+		} else if (val == I915_READ(WRPLL_CTL2)) {
+			DRM_DEBUG_KMS("Reusing WRPLL 2 on pipe %c\n",
+				      pipe_name(pipe));
+			reg = WRPLL_CTL2;
+		} else if (plls->wrpll1_refcount == 0) {
 			DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
 			DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
 				      pipe_name(pipe));
 				      pipe_name(pipe));
-			plls->wrpll1_refcount++;
 			reg = WRPLL_CTL1;
 			reg = WRPLL_CTL1;
-			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
 		} else if (plls->wrpll2_refcount == 0) {
 		} else if (plls->wrpll2_refcount == 0) {
 			DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
 			DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
 				      pipe_name(pipe));
 				      pipe_name(pipe));
-			plls->wrpll2_refcount++;
 			reg = WRPLL_CTL2;
 			reg = WRPLL_CTL2;
-			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
 		} else {
 		} else {
 			DRM_ERROR("No WRPLLs available!\n");
 			DRM_ERROR("No WRPLLs available!\n");
 			return false;
 			return false;
 		}
 		}
 
 
-		WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
-		     "WRPLL already enabled\n");
-
-		intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
-
-		val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
-		      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
-		      WRPLL_DIVIDER_POST(p);
+		if (reg == WRPLL_CTL1) {
+			plls->wrpll1_refcount++;
+			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
+		} else {
+			plls->wrpll2_refcount++;
+			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
+		}
 
 
 	} else if (type == INTEL_OUTPUT_ANALOG) {
 	} else if (type == INTEL_OUTPUT_ANALOG) {
 		if (plls->spll_refcount == 0) {
 		if (plls->spll_refcount == 0) {

+ 256 - 63
drivers/gpu/drm/i915/intel_display.c

@@ -748,10 +748,10 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
 	return intel_crtc->config.cpu_transcoder;
 	return intel_crtc->config.cpu_transcoder;
 }
 }
 
 
-static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
+static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 frame, frame_reg = PIPEFRAME(pipe);
+	u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
 
 
 	frame = I915_READ(frame_reg);
 	frame = I915_READ(frame_reg);
 
 
@@ -772,8 +772,8 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int pipestat_reg = PIPESTAT(pipe);
 	int pipestat_reg = PIPESTAT(pipe);
 
 
-	if (INTEL_INFO(dev)->gen >= 5) {
-		ironlake_wait_for_vblank(dev, pipe);
+	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+		g4x_wait_for_vblank(dev, pipe);
 		return;
 		return;
 	}
 	}
 
 
@@ -1361,6 +1361,7 @@ static void intel_init_dpio(struct drm_device *dev)
 	if (!IS_VALLEYVIEW(dev))
 	if (!IS_VALLEYVIEW(dev))
 		return;
 		return;
 
 
+	DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
 	/*
 	/*
 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
@@ -1494,18 +1495,25 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 	POSTING_READ(DPLL(pipe));
 	POSTING_READ(DPLL(pipe));
 }
 }
 
 
-void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+		struct intel_digital_port *dport)
 {
 {
 	u32 port_mask;
 	u32 port_mask;
 
 
-	if (!port)
+	switch (dport->port) {
+	case PORT_B:
 		port_mask = DPLL_PORTB_READY_MASK;
 		port_mask = DPLL_PORTB_READY_MASK;
-	else
+		break;
+	case PORT_C:
 		port_mask = DPLL_PORTC_READY_MASK;
 		port_mask = DPLL_PORTC_READY_MASK;
+		break;
+	default:
+		BUG();
+	}
 
 
 	if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
 	if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
 		WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
 		WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
-		     'B' + port, I915_READ(DPLL(0)));
+		     port_name(dport->port), I915_READ(DPLL(0)));
 }
 }
 
 
 /**
 /**
@@ -2233,7 +2241,12 @@ void intel_display_handle_reset(struct drm_device *dev)
 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
 
 		mutex_lock(&crtc->mutex);
 		mutex_lock(&crtc->mutex);
-		if (intel_crtc->active)
+		/*
+		 * FIXME: Once we have proper support for primary planes (and
+		 * disabling them without disabling the entire crtc) allow again
+		 * a NULL crtc->fb.
+		 */
+		if (intel_crtc->active && crtc->fb)
 			dev_priv->display.update_plane(crtc, crtc->fb,
 			dev_priv->display.update_plane(crtc, crtc->fb,
 						       crtc->x, crtc->y);
 						       crtc->x, crtc->y);
 		mutex_unlock(&crtc->mutex);
 		mutex_unlock(&crtc->mutex);
@@ -3910,6 +3923,174 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
 	I915_WRITE(BCLRPAT(crtc->pipe), 0);
 	I915_WRITE(BCLRPAT(crtc->pipe), 0);
 }
 }
 
 
+int valleyview_get_vco(struct drm_i915_private *dev_priv)
+{
+	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
+
+	/* Obtain SKU information */
+	mutex_lock(&dev_priv->dpio_lock);
+	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
+		CCK_FUSE_HPLL_FREQ_MASK;
+	mutex_unlock(&dev_priv->dpio_lock);
+
+	return vco_freq[hpll_freq];
+}
+
+/* Adjust CDclk dividers to allow high res or save power if possible */
+static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val, cmd;
+
+	if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
+		cmd = 2;
+	else if (cdclk == 266)
+		cmd = 1;
+	else
+		cmd = 0;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+	val &= ~DSPFREQGUAR_MASK;
+	val |= (cmd << DSPFREQGUAR_SHIFT);
+	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
+	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
+		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
+		     50)) {
+		DRM_ERROR("timed out waiting for CDclk change\n");
+	}
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	if (cdclk == 400) {
+		u32 divider, vco;
+
+		vco = valleyview_get_vco(dev_priv);
+		divider = ((vco << 1) / cdclk) - 1;
+
+		mutex_lock(&dev_priv->dpio_lock);
+		/* adjust cdclk divider */
+		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+		val &= ~0xf;
+		val |= divider;
+		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
+		mutex_unlock(&dev_priv->dpio_lock);
+	}
+
+	mutex_lock(&dev_priv->dpio_lock);
+	/* adjust self-refresh exit latency value */
+	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
+	val &= ~0x7f;
+
+	/*
+	 * For high bandwidth configs, we set a higher latency in the bunit
+	 * so that the core display fetch happens in time to avoid underruns.
+	 */
+	if (cdclk == 400)
+		val |= 4500 / 250; /* 4.5 usec */
+	else
+		val |= 3000 / 250; /* 3.0 usec */
+	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
+	mutex_unlock(&dev_priv->dpio_lock);
+
+	/* Since we changed the CDclk, we need to update the GMBUSFREQ too */
+	intel_i2c_reset(dev);
+}
+
+static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
+{
+	int cur_cdclk, vco;
+	int divider;
+
+	vco = valleyview_get_vco(dev_priv);
+
+	mutex_lock(&dev_priv->dpio_lock);
+	divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+	mutex_unlock(&dev_priv->dpio_lock);
+
+	divider &= 0xf;
+
+	cur_cdclk = (vco << 1) / (divider + 1);
+
+	return cur_cdclk;
+}
+
+static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
+				 int max_pixclk)
+{
+	int cur_cdclk;
+
+	cur_cdclk = valleyview_cur_cdclk(dev_priv);
+
+	/*
+	 * Really only a few cases to deal with, as only 4 CDclks are supported:
+	 *   200MHz
+	 *   267MHz
+	 *   320MHz
+	 *   400MHz
+	 * So we check to see whether we're above 90% of the lower bin and
+	 * adjust if needed.
+	 */
+	if (max_pixclk > 288000) {
+		return 400;
+	} else if (max_pixclk > 240000) {
+		return 320;
+	} else
+		return 266;
+	/* Looks like the 200MHz CDclk freq doesn't work on some configs */
+}
+
+static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
+				 unsigned modeset_pipes,
+				 struct intel_crtc_config *pipe_config)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct intel_crtc *intel_crtc;
+	int max_pixclk = 0;
+
+	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+			    base.head) {
+		if (modeset_pipes & (1 << intel_crtc->pipe))
+			max_pixclk = max(max_pixclk,
+					 pipe_config->adjusted_mode.crtc_clock);
+		else if (intel_crtc->base.enabled)
+			max_pixclk = max(max_pixclk,
+					 intel_crtc->config.adjusted_mode.crtc_clock);
+	}
+
+	return max_pixclk;
+}
+
+static void valleyview_modeset_global_pipes(struct drm_device *dev,
+					    unsigned *prepare_pipes,
+					    unsigned modeset_pipes,
+					    struct intel_crtc_config *pipe_config)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc;
+	int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes,
+					       pipe_config);
+	int cur_cdclk = valleyview_cur_cdclk(dev_priv);
+
+	if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk)
+		return;
+
+	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+			    base.head)
+		if (intel_crtc->base.enabled)
+			*prepare_pipes |= (1 << intel_crtc->pipe);
+}
+
+static void valleyview_modeset_global_resources(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL);
+	int cur_cdclk = valleyview_cur_cdclk(dev_priv);
+	int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
+
+	if (req_cdclk != cur_cdclk)
+		valleyview_set_cdclk(dev, req_cdclk);
+}
+
 static void valleyview_crtc_enable(struct drm_crtc *crtc)
 static void valleyview_crtc_enable(struct drm_crtc *crtc)
 {
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_device *dev = crtc->dev;
@@ -4634,24 +4815,24 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
 	 * and set it to a reasonable value instead.
 	 * and set it to a reasonable value instead.
 	 */
 	 */
-	reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
+	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
 	reg_val &= 0xffffff00;
 	reg_val &= 0xffffff00;
 	reg_val |= 0x00000030;
 	reg_val |= 0x00000030;
-	vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
 
 
-	reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
+	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
 	reg_val &= 0x8cffffff;
 	reg_val &= 0x8cffffff;
 	reg_val = 0x8c000000;
 	reg_val = 0x8c000000;
-	vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
+	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
 
 
-	reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
+	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
 	reg_val &= 0xffffff00;
 	reg_val &= 0xffffff00;
-	vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
 
 
-	reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
+	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
 	reg_val &= 0x00ffffff;
 	reg_val &= 0x00ffffff;
 	reg_val |= 0xb0000000;
 	reg_val |= 0xb0000000;
-	vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
+	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
 }
 }
 
 
 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
@@ -4720,15 +4901,15 @@ static void vlv_update_pll(struct intel_crtc *crtc)
 		vlv_pllb_recal_opamp(dev_priv, pipe);
 		vlv_pllb_recal_opamp(dev_priv, pipe);
 
 
 	/* Set up Tx target for periodic Rcomp update */
 	/* Set up Tx target for periodic Rcomp update */
-	vlv_dpio_write(dev_priv, pipe, DPIO_IREF_BCAST, 0x0100000f);
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
 
 
 	/* Disable target IRef on PLL */
 	/* Disable target IRef on PLL */
-	reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF_CTL(pipe));
+	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
 	reg_val &= 0x00ffffff;
 	reg_val &= 0x00ffffff;
-	vlv_dpio_write(dev_priv, pipe, DPIO_IREF_CTL(pipe), reg_val);
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
 
 
 	/* Disable fast lock */
 	/* Disable fast lock */
-	vlv_dpio_write(dev_priv, pipe, DPIO_FASTCLK_DISABLE, 0x610);
+	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
 
 
 	/* Set idtafcrecal before PLL is enabled */
 	/* Set idtafcrecal before PLL is enabled */
 	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
 	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
@@ -4742,48 +4923,48 @@ static void vlv_update_pll(struct intel_crtc *crtc)
 	 * Note: don't use the DAC post divider as it seems unstable.
 	 * Note: don't use the DAC post divider as it seems unstable.
 	 */
 	 */
 	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
 	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
-	vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
 
 
 	mdiv |= DPIO_ENABLE_CALIBRATION;
 	mdiv |= DPIO_ENABLE_CALIBRATION;
-	vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
 
 
 	/* Set HBR and RBR LPF coefficients */
 	/* Set HBR and RBR LPF coefficients */
 	if (crtc->config.port_clock == 162000 ||
 	if (crtc->config.port_clock == 162000 ||
 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
-		vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
+		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
 				 0x009f0003);
 				 0x009f0003);
 	else
 	else
-		vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
+		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
 				 0x00d0000f);
 				 0x00d0000f);
 
 
 	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
 	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
 		/* Use SSC source */
 		/* Use SSC source */
 		if (!pipe)
 		if (!pipe)
-			vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
 					 0x0df40000);
 					 0x0df40000);
 		else
 		else
-			vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
 					 0x0df70000);
 					 0x0df70000);
 	} else { /* HDMI or VGA */
 	} else { /* HDMI or VGA */
 		/* Use bend source */
 		/* Use bend source */
 		if (!pipe)
 		if (!pipe)
-			vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
 					 0x0df70000);
 					 0x0df70000);
 		else
 		else
-			vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
+			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
 					 0x0df40000);
 					 0x0df40000);
 	}
 	}
 
 
-	coreclk = vlv_dpio_read(dev_priv, pipe, DPIO_CORE_CLK(pipe));
+	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
 	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
 	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
 		coreclk |= 0x01000000;
 		coreclk |= 0x01000000;
-	vlv_dpio_write(dev_priv, pipe, DPIO_CORE_CLK(pipe), coreclk);
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
 
 
-	vlv_dpio_write(dev_priv, pipe, DPIO_PLL_CML(pipe), 0x87871000);
+	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
 
 
 	/* Enable DPIO clock input */
 	/* Enable DPIO clock input */
 	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
 	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
@@ -5261,7 +5442,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
 	int refclk = 100000;
 	int refclk = 100000;
 
 
 	mutex_lock(&dev_priv->dpio_lock);
 	mutex_lock(&dev_priv->dpio_lock);
-	mdiv = vlv_dpio_read(dev_priv, pipe, DPIO_DIV(pipe));
+	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
 	mutex_unlock(&dev_priv->dpio_lock);
 	mutex_unlock(&dev_priv->dpio_lock);
 
 
 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
@@ -6402,7 +6583,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
 
 
 	/* Make sure we're not on PC8 state before disabling PC8, otherwise
 	/* Make sure we're not on PC8 state before disabling PC8, otherwise
 	 * we'll hang the machine! */
 	 * we'll hang the machine! */
-	gen6_gt_force_wake_get(dev_priv);
+	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
 
 	if (val & LCPLL_POWER_DOWN_ALLOW) {
 	if (val & LCPLL_POWER_DOWN_ALLOW) {
 		val &= ~LCPLL_POWER_DOWN_ALLOW;
 		val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -6436,7 +6617,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
 			DRM_ERROR("Switching back to LCPLL failed\n");
 			DRM_ERROR("Switching back to LCPLL failed\n");
 	}
 	}
 
 
-	gen6_gt_force_wake_put(dev_priv);
+	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 }
 }
 
 
 void hsw_enable_pc8_work(struct work_struct *__work)
 void hsw_enable_pc8_work(struct work_struct *__work)
@@ -9402,6 +9583,21 @@ static int __intel_set_mode(struct drm_crtc *crtc,
 				       "[modeset]");
 				       "[modeset]");
 	}
 	}
 
 
+	/*
+	 * See if the config requires any additional preparation, e.g.
+	 * to adjust global state with pipes off.  We need to do this
+	 * here so we can get the modeset_pipe updated config for the new
+	 * mode set on this crtc.  For other crtcs we need to use the
+	 * adjusted_mode bits in the crtc directly.
+	 */
+	if (IS_VALLEYVIEW(dev)) {
+		valleyview_modeset_global_pipes(dev, &prepare_pipes,
+						modeset_pipes, pipe_config);
+
+		/* may have added more to prepare_pipes than we should */
+		prepare_pipes &= ~disable_pipes;
+	}
+
 	for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
 	for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
 		intel_crtc_disable(&intel_crtc->base);
 		intel_crtc_disable(&intel_crtc->base);
 
 
@@ -10412,8 +10608,11 @@ static void intel_init_display(struct drm_device *dev)
 		}
 		}
 	} else if (IS_G4X(dev)) {
 	} else if (IS_G4X(dev)) {
 		dev_priv->display.write_eld = g4x_write_eld;
 		dev_priv->display.write_eld = g4x_write_eld;
-	} else if (IS_VALLEYVIEW(dev))
+	} else if (IS_VALLEYVIEW(dev)) {
+		dev_priv->display.modeset_global_resources =
+			valleyview_modeset_global_resources;
 		dev_priv->display.write_eld = ironlake_write_eld;
 		dev_priv->display.write_eld = ironlake_write_eld;
+	}
 
 
 	/* Default just returns -ENODEV to indicate unsupported */
 	/* Default just returns -ENODEV to indicate unsupported */
 	dev_priv->display.queue_flip = intel_default_queue_flip;
 	dev_priv->display.queue_flip = intel_default_queue_flip;
@@ -10440,6 +10639,8 @@ static void intel_init_display(struct drm_device *dev)
 		dev_priv->display.queue_flip = intel_gen7_queue_flip;
 		dev_priv->display.queue_flip = intel_gen7_queue_flip;
 		break;
 		break;
 	}
 	}
+
+	intel_panel_init_backlight_funcs(dev);
 }
 }
 
 
 /*
 /*
@@ -10476,17 +10677,6 @@ static void quirk_invert_brightness(struct drm_device *dev)
 	DRM_INFO("applying inverted panel brightness quirk\n");
 	DRM_INFO("applying inverted panel brightness quirk\n");
 }
 }
 
 
-/*
- * Some machines (Dell XPS13) suffer broken backlight controls if
- * BLM_PCH_PWM_ENABLE is set.
- */
-static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
-	DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
-}
-
 struct intel_quirk {
 struct intel_quirk {
 	int device;
 	int device;
 	int subsystem_vendor;
 	int subsystem_vendor;
@@ -10546,11 +10736,6 @@ static struct intel_quirk intel_quirks[] = {
 	 * seem to use inverted backlight PWM.
 	 * seem to use inverted backlight PWM.
 	 */
 	 */
 	{ 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness },
 	{ 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness },
-
-	/* Dell XPS13 HD Sandy Bridge */
-	{ 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
-	/* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
-	{ 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
 };
 };
 
 
 static void intel_init_quirks(struct drm_device *dev)
 static void intel_init_quirks(struct drm_device *dev)
@@ -10870,7 +11055,7 @@ void i915_redisable_vga(struct drm_device *dev)
 	 * level, just check if the power well is enabled instead of trying to
 	 * level, just check if the power well is enabled instead of trying to
 	 * follow the "don't touch the power well if we don't need it" policy
 	 * follow the "don't touch the power well if we don't need it" policy
 	 * the rest of the driver uses. */
 	 * the rest of the driver uses. */
-	if (HAS_POWER_WELL(dev) &&
+	if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
 	    (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
 	    (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
 		return;
 		return;
 
 
@@ -11091,12 +11276,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
 	/* flush any delayed tasks or pending work */
 	/* flush any delayed tasks or pending work */
 	flush_scheduled_work();
 	flush_scheduled_work();
 
 
-	/* destroy backlight, if any, before the connectors */
-	intel_panel_destroy_backlight(dev);
-
-	/* destroy the sysfs files before encoders/connectors */
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+	/* destroy the backlight and sysfs files before encoders/connectors */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		intel_panel_destroy_backlight(connector);
 		drm_sysfs_connector_remove(connector);
 		drm_sysfs_connector_remove(connector);
+	}
 
 
 	drm_mode_config_cleanup(dev);
 	drm_mode_config_cleanup(dev);
 
 
@@ -11150,6 +11334,7 @@ struct intel_display_error_state {
 	} cursor[I915_MAX_PIPES];
 	} cursor[I915_MAX_PIPES];
 
 
 	struct intel_pipe_error_state {
 	struct intel_pipe_error_state {
+		bool power_domain_on;
 		u32 source;
 		u32 source;
 	} pipe[I915_MAX_PIPES];
 	} pipe[I915_MAX_PIPES];
 
 
@@ -11164,6 +11349,7 @@ struct intel_display_error_state {
 	} plane[I915_MAX_PIPES];
 	} plane[I915_MAX_PIPES];
 
 
 	struct intel_transcoder_error_state {
 	struct intel_transcoder_error_state {
+		bool power_domain_on;
 		enum transcoder cpu_transcoder;
 		enum transcoder cpu_transcoder;
 
 
 		u32 conf;
 		u32 conf;
@@ -11197,11 +11383,13 @@ intel_display_capture_error_state(struct drm_device *dev)
 	if (error == NULL)
 	if (error == NULL)
 		return NULL;
 		return NULL;
 
 
-	if (HAS_POWER_WELL(dev))
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
 		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
 		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
 
 
 	for_each_pipe(i) {
 	for_each_pipe(i) {
-		if (!intel_display_power_enabled(dev, POWER_DOMAIN_PIPE(i)))
+		error->pipe[i].power_domain_on =
+			intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i));
+		if (!error->pipe[i].power_domain_on)
 			continue;
 			continue;
 
 
 		if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
 		if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
@@ -11237,8 +11425,9 @@ intel_display_capture_error_state(struct drm_device *dev)
 	for (i = 0; i < error->num_transcoders; i++) {
 	for (i = 0; i < error->num_transcoders; i++) {
 		enum transcoder cpu_transcoder = transcoders[i];
 		enum transcoder cpu_transcoder = transcoders[i];
 
 
-		if (!intel_display_power_enabled(dev,
-				POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
+		error->transcoder[i].power_domain_on =
+			intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i));
+		if (!error->transcoder[i].power_domain_on)
 			continue;
 			continue;
 
 
 		error->transcoder[i].cpu_transcoder = cpu_transcoder;
 		error->transcoder[i].cpu_transcoder = cpu_transcoder;
@@ -11268,11 +11457,13 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
 		return;
 		return;
 
 
 	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
 	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
-	if (HAS_POWER_WELL(dev))
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
 		err_printf(m, "PWR_WELL_CTL2: %08x\n",
 		err_printf(m, "PWR_WELL_CTL2: %08x\n",
 			   error->power_well_driver);
 			   error->power_well_driver);
 	for_each_pipe(i) {
 	for_each_pipe(i) {
 		err_printf(m, "Pipe [%d]:\n", i);
 		err_printf(m, "Pipe [%d]:\n", i);
+		err_printf(m, "  Power: %s\n",
+			   error->pipe[i].power_domain_on ? "on" : "off");
 		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
 		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
 
 
 		err_printf(m, "Plane [%d]:\n", i);
 		err_printf(m, "Plane [%d]:\n", i);
@@ -11298,6 +11489,8 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
 	for (i = 0; i < error->num_transcoders; i++) {
 	for (i = 0; i < error->num_transcoders; i++) {
 		err_printf(m, "CPU transcoder: %c\n",
 		err_printf(m, "CPU transcoder: %c\n",
 			   transcoder_name(error->transcoder[i].cpu_transcoder));
 			   transcoder_name(error->transcoder[i].cpu_transcoder));
+		err_printf(m, "  Power: %s\n",
+			   error->transcoder[i].power_domain_on ? "on" : "off");
 		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
 		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
 		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
 		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
 		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
 		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);

+ 22 - 22
drivers/gpu/drm/i915/intel_dp.c

@@ -142,7 +142,7 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
 	return (max_link_clock * max_lanes * 8) / 10;
 	return (max_link_clock * max_lanes * 8) / 10;
 }
 }
 
 
-static int
+static enum drm_mode_status
 intel_dp_mode_valid(struct drm_connector *connector,
 intel_dp_mode_valid(struct drm_connector *connector,
 		    struct drm_display_mode *mode)
 		    struct drm_display_mode *mode)
 {
 {
@@ -404,7 +404,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
 	int i, ret, recv_bytes;
 	int i, ret, recv_bytes;
 	uint32_t status;
 	uint32_t status;
 	int try, precharge, clock = 0;
 	int try, precharge, clock = 0;
-	bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
+	bool has_aux_irq = true;
 	uint32_t timeout;
 	uint32_t timeout;
 
 
 	/* dp aux is extremely sensitive to irq latency, hence request the
 	/* dp aux is extremely sensitive to irq latency, hence request the
@@ -1845,23 +1845,23 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-	int port = vlv_dport_to_channel(dport);
+	enum dpio_channel port = vlv_dport_to_channel(dport);
 	int pipe = intel_crtc->pipe;
 	int pipe = intel_crtc->pipe;
 	struct edp_power_seq power_seq;
 	struct edp_power_seq power_seq;
 	u32 val;
 	u32 val;
 
 
 	mutex_lock(&dev_priv->dpio_lock);
 	mutex_lock(&dev_priv->dpio_lock);
 
 
-	val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
 	val = 0;
 	val = 0;
 	if (pipe)
 	if (pipe)
 		val |= (1<<21);
 		val |= (1<<21);
 	else
 	else
 		val &= ~(1<<21);
 		val &= ~(1<<21);
 	val |= 0x001000c4;
 	val |= 0x001000c4;
-	vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
 
 
 	mutex_unlock(&dev_priv->dpio_lock);
 	mutex_unlock(&dev_priv->dpio_lock);
 
 
@@ -1872,7 +1872,7 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
 
 
 	intel_enable_dp(encoder);
 	intel_enable_dp(encoder);
 
 
-	vlv_wait_port_ready(dev_priv, port);
+	vlv_wait_port_ready(dev_priv, dport);
 }
 }
 
 
 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
@@ -1882,24 +1882,24 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc =
 	struct intel_crtc *intel_crtc =
 		to_intel_crtc(encoder->base.crtc);
 		to_intel_crtc(encoder->base.crtc);
-	int port = vlv_dport_to_channel(dport);
+	enum dpio_channel port = vlv_dport_to_channel(dport);
 	int pipe = intel_crtc->pipe;
 	int pipe = intel_crtc->pipe;
 
 
 	/* Program Tx lane resets to default */
 	/* Program Tx lane resets to default */
 	mutex_lock(&dev_priv->dpio_lock);
 	mutex_lock(&dev_priv->dpio_lock);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
 			 DPIO_PCS_TX_LANE2_RESET |
 			 DPIO_PCS_TX_LANE2_RESET |
 			 DPIO_PCS_TX_LANE1_RESET);
 			 DPIO_PCS_TX_LANE1_RESET);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
 			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
 			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
 			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
 			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
 			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
 			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
 				 DPIO_PCS_CLK_SOFT_RESET);
 				 DPIO_PCS_CLK_SOFT_RESET);
 
 
 	/* Fix up inter-pair skew failure */
 	/* Fix up inter-pair skew failure */
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
 	mutex_unlock(&dev_priv->dpio_lock);
 	mutex_unlock(&dev_priv->dpio_lock);
 }
 }
 
 
@@ -2050,7 +2050,7 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
 	unsigned long demph_reg_value, preemph_reg_value,
 	unsigned long demph_reg_value, preemph_reg_value,
 		uniqtranscale_reg_value;
 		uniqtranscale_reg_value;
 	uint8_t train_set = intel_dp->train_set[0];
 	uint8_t train_set = intel_dp->train_set[0];
-	int port = vlv_dport_to_channel(dport);
+	enum dpio_channel port = vlv_dport_to_channel(dport);
 	int pipe = intel_crtc->pipe;
 	int pipe = intel_crtc->pipe;
 
 
 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
@@ -2127,14 +2127,14 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
 	}
 	}
 
 
 	mutex_lock(&dev_priv->dpio_lock);
 	mutex_lock(&dev_priv->dpio_lock);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x00000000);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), demph_reg_value);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
 			 uniqtranscale_reg_value);
 			 uniqtranscale_reg_value);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), 0x0C782040);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x80000000);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
 	mutex_unlock(&dev_priv->dpio_lock);
 	mutex_unlock(&dev_priv->dpio_lock);
 
 
 	return 0;
 	return 0;

+ 20 - 5
drivers/gpu/drm/i915/intel_drv.h

@@ -156,6 +156,17 @@ struct intel_encoder {
 struct intel_panel {
 struct intel_panel {
 	struct drm_display_mode *fixed_mode;
 	struct drm_display_mode *fixed_mode;
 	int fitting_mode;
 	int fitting_mode;
+
+	/* backlight */
+	struct {
+		bool present;
+		u32 level;
+		u32 max;
+		bool enabled;
+		bool combination_mode;	/* gen 2/4 only */
+		bool active_low_pwm;
+		struct backlight_device *device;
+	} backlight;
 };
 };
 
 
 struct intel_connector {
 struct intel_connector {
@@ -490,9 +501,9 @@ vlv_dport_to_channel(struct intel_digital_port *dport)
 {
 {
 	switch (dport->port) {
 	switch (dport->port) {
 	case PORT_B:
 	case PORT_B:
-		return 0;
+		return DPIO_CH0;
 	case PORT_C:
 	case PORT_C:
-		return 1;
+		return DPIO_CH1;
 	default:
 	default:
 		BUG();
 		BUG();
 	}
 	}
@@ -638,7 +649,8 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
 void intel_wait_for_vblank(struct drm_device *dev, int pipe);
 void intel_wait_for_vblank(struct drm_device *dev, int pipe);
 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
-void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+			 struct intel_digital_port *dport);
 bool intel_get_load_detect_pipe(struct drm_connector *connector,
 bool intel_get_load_detect_pipe(struct drm_connector *connector,
 				struct drm_display_mode *mode,
 				struct drm_display_mode *mode,
 				struct intel_load_detect_pipe *old);
 				struct intel_load_detect_pipe *old);
@@ -694,7 +706,7 @@ void i915_disable_vga_mem(struct drm_device *dev);
 void hsw_enable_ips(struct intel_crtc *crtc);
 void hsw_enable_ips(struct intel_crtc *crtc);
 void hsw_disable_ips(struct intel_crtc *crtc);
 void hsw_disable_ips(struct intel_crtc *crtc);
 void intel_display_set_init_power(struct drm_device *dev, bool enable);
 void intel_display_set_init_power(struct drm_device *dev, bool enable);
-
+int valleyview_get_vco(struct drm_i915_private *dev_priv);
 
 
 /* intel_dp.c */
 /* intel_dp.c */
 void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
 void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
@@ -808,7 +820,8 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
 int intel_panel_setup_backlight(struct drm_connector *connector);
 int intel_panel_setup_backlight(struct drm_connector *connector);
 void intel_panel_enable_backlight(struct intel_connector *connector);
 void intel_panel_enable_backlight(struct intel_connector *connector);
 void intel_panel_disable_backlight(struct intel_connector *connector);
 void intel_panel_disable_backlight(struct intel_connector *connector);
-void intel_panel_destroy_backlight(struct drm_device *dev);
+void intel_panel_destroy_backlight(struct drm_connector *connector);
+void intel_panel_init_backlight_funcs(struct drm_device *dev);
 enum drm_connector_status intel_panel_detect(struct drm_device *dev);
 enum drm_connector_status intel_panel_detect(struct drm_device *dev);
 
 
 
 
@@ -829,6 +842,8 @@ int intel_power_domains_init(struct drm_device *dev);
 void intel_power_domains_remove(struct drm_device *dev);
 void intel_power_domains_remove(struct drm_device *dev);
 bool intel_display_power_enabled(struct drm_device *dev,
 bool intel_display_power_enabled(struct drm_device *dev,
 				 enum intel_display_power_domain domain);
 				 enum intel_display_power_domain domain);
+bool intel_display_power_enabled_sw(struct drm_device *dev,
+				    enum intel_display_power_domain domain);
 void intel_display_power_get(struct drm_device *dev,
 void intel_display_power_get(struct drm_device *dev,
 			     enum intel_display_power_domain domain);
 			     enum intel_display_power_domain domain);
 void intel_display_power_put(struct drm_device *dev,
 void intel_display_power_put(struct drm_device *dev,

+ 3 - 2
drivers/gpu/drm/i915/intel_dsi.c

@@ -251,8 +251,9 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
 	/* XXX: read flags, set to adjusted_mode */
 	/* XXX: read flags, set to adjusted_mode */
 }
 }
 
 
-static int intel_dsi_mode_valid(struct drm_connector *connector,
-				struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_dsi_mode_valid(struct drm_connector *connector,
+		     struct drm_display_mode *mode)
 {
 {
 	struct intel_connector *intel_connector = to_intel_connector(connector);
 	struct intel_connector *intel_connector = to_intel_connector(connector);
 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;

+ 3 - 2
drivers/gpu/drm/i915/intel_dvo.c

@@ -234,8 +234,9 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
 	intel_modeset_check_state(connector->dev);
 	intel_modeset_check_state(connector->dev);
 }
 }
 
 
-static int intel_dvo_mode_valid(struct drm_connector *connector,
-				struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_dvo_mode_valid(struct drm_connector *connector,
+		     struct drm_display_mode *mode)
 {
 {
 	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
 	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
 
 

+ 47 - 16
drivers/gpu/drm/i915/intel_fbdev.c

@@ -57,18 +57,14 @@ static struct fb_ops intelfb_ops = {
 	.fb_debug_leave = drm_fb_helper_debug_leave,
 	.fb_debug_leave = drm_fb_helper_debug_leave,
 };
 };
 
 
-static int intelfb_create(struct drm_fb_helper *helper,
-			  struct drm_fb_helper_surface_size *sizes)
+static int intelfb_alloc(struct drm_fb_helper *helper,
+			 struct drm_fb_helper_surface_size *sizes)
 {
 {
 	struct intel_fbdev *ifbdev =
 	struct intel_fbdev *ifbdev =
 		container_of(helper, struct intel_fbdev, helper);
 		container_of(helper, struct intel_fbdev, helper);
 	struct drm_device *dev = helper->dev;
 	struct drm_device *dev = helper->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct fb_info *info;
-	struct drm_framebuffer *fb;
 	struct drm_mode_fb_cmd2 mode_cmd = {};
 	struct drm_mode_fb_cmd2 mode_cmd = {};
 	struct drm_i915_gem_object *obj;
 	struct drm_i915_gem_object *obj;
-	struct device *device = &dev->pdev->dev;
 	int size, ret;
 	int size, ret;
 
 
 	/* we don't do packed 24bpp */
 	/* we don't do packed 24bpp */
@@ -94,8 +90,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
 		goto out;
 		goto out;
 	}
 	}
 
 
-	mutex_lock(&dev->struct_mutex);
-
 	/* Flush everything out, we'll be doing GTT only from now on */
 	/* Flush everything out, we'll be doing GTT only from now on */
 	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
 	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
 	if (ret) {
 	if (ret) {
@@ -103,7 +97,50 @@ static int intelfb_create(struct drm_fb_helper *helper,
 		goto out_unref;
 		goto out_unref;
 	}
 	}
 
 
-	info = framebuffer_alloc(0, device);
+	ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
+	if (ret)
+		goto out_unpin;
+
+	return 0;
+
+out_unpin:
+	i915_gem_object_unpin(obj);
+out_unref:
+	drm_gem_object_unreference(&obj->base);
+out:
+	return ret;
+}
+
+static int intelfb_create(struct drm_fb_helper *helper,
+			  struct drm_fb_helper_surface_size *sizes)
+{
+	struct intel_fbdev *ifbdev =
+		container_of(helper, struct intel_fbdev, helper);
+	struct intel_framebuffer *intel_fb = &ifbdev->ifb;
+	struct drm_device *dev = helper->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct fb_info *info;
+	struct drm_framebuffer *fb;
+	struct drm_i915_gem_object *obj;
+	int size, ret;
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (!intel_fb->obj) {
+		DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
+		ret = intelfb_alloc(helper, sizes);
+		if (ret)
+			goto out_unlock;
+	} else {
+		DRM_DEBUG_KMS("re-using BIOS fb\n");
+		sizes->fb_width = intel_fb->base.width;
+		sizes->fb_height = intel_fb->base.height;
+	}
+
+	obj = intel_fb->obj;
+	size = obj->base.size;
+
+	info = framebuffer_alloc(0, &dev->pdev->dev);
 	if (!info) {
 	if (!info) {
 		ret = -ENOMEM;
 		ret = -ENOMEM;
 		goto out_unpin;
 		goto out_unpin;
@@ -111,10 +148,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
 
 
 	info->par = helper;
 	info->par = helper;
 
 
-	ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
-	if (ret)
-		goto out_unpin;
-
 	fb = &ifbdev->ifb.base;
 	fb = &ifbdev->ifb.base;
 
 
 	ifbdev->helper.fb = fb;
 	ifbdev->helper.fb = fb;
@@ -170,17 +203,15 @@ static int intelfb_create(struct drm_fb_helper *helper,
 		      fb->width, fb->height,
 		      fb->width, fb->height,
 		      i915_gem_obj_ggtt_offset(obj), obj);
 		      i915_gem_obj_ggtt_offset(obj), obj);
 
 
-
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
 	vga_switcheroo_client_fb_set(dev->pdev, info);
 	vga_switcheroo_client_fb_set(dev->pdev, info);
 	return 0;
 	return 0;
 
 
 out_unpin:
 out_unpin:
 	i915_gem_object_unpin(obj);
 	i915_gem_object_unpin(obj);
-out_unref:
 	drm_gem_object_unreference(&obj->base);
 	drm_gem_object_unreference(&obj->base);
+out_unlock:
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
-out:
 	return ret;
 	return ret;
 }
 }
 
 

+ 29 - 38
drivers/gpu/drm/i915/intel_hdmi.c

@@ -853,8 +853,9 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
 		return 225000;
 		return 225000;
 }
 }
 
 
-static int intel_hdmi_mode_valid(struct drm_connector *connector,
-				 struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_hdmi_mode_valid(struct drm_connector *connector,
+		      struct drm_display_mode *mode)
 {
 {
 	if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
 	if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
 		return MODE_CLOCK_HIGH;
 		return MODE_CLOCK_HIGH;
@@ -1081,7 +1082,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc =
 	struct intel_crtc *intel_crtc =
 		to_intel_crtc(encoder->base.crtc);
 		to_intel_crtc(encoder->base.crtc);
-	int port = vlv_dport_to_channel(dport);
+	enum dpio_channel port = vlv_dport_to_channel(dport);
 	int pipe = intel_crtc->pipe;
 	int pipe = intel_crtc->pipe;
 	u32 val;
 	u32 val;
 
 
@@ -1090,41 +1091,33 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
 
 
 	/* Enable clock channels for this port */
 	/* Enable clock channels for this port */
 	mutex_lock(&dev_priv->dpio_lock);
 	mutex_lock(&dev_priv->dpio_lock);
-	val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
+	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
 	val = 0;
 	val = 0;
 	if (pipe)
 	if (pipe)
 		val |= (1<<21);
 		val |= (1<<21);
 	else
 	else
 		val &= ~(1<<21);
 		val &= ~(1<<21);
 	val |= 0x001000c4;
 	val |= 0x001000c4;
-	vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
 
 
 	/* HDMI 1.0V-2dB */
 	/* HDMI 1.0V-2dB */
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port),
-			 0x2b245f5f);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
-			 0x5578b83a);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port),
-			 0x0c782040);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX3_SWING_CTL4(port),
-			 0x2b247878);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
-			 0x00002000);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
-			 DPIO_TX_OCALINIT_EN);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
 
 
 	/* Program lane clock */
 	/* Program lane clock */
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port),
-			 0x00760018);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port),
-			 0x00400888);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
 	mutex_unlock(&dev_priv->dpio_lock);
 	mutex_unlock(&dev_priv->dpio_lock);
 
 
 	intel_enable_hdmi(encoder);
 	intel_enable_hdmi(encoder);
 
 
-	vlv_wait_port_ready(dev_priv, port);
+	vlv_wait_port_ready(dev_priv, dport);
 }
 }
 
 
 static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
 static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
@@ -1134,7 +1127,7 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc =
 	struct intel_crtc *intel_crtc =
 		to_intel_crtc(encoder->base.crtc);
 		to_intel_crtc(encoder->base.crtc);
-	int port = vlv_dport_to_channel(dport);
+	enum dpio_channel port = vlv_dport_to_channel(dport);
 	int pipe = intel_crtc->pipe;
 	int pipe = intel_crtc->pipe;
 
 
 	if (!IS_VALLEYVIEW(dev))
 	if (!IS_VALLEYVIEW(dev))
@@ -1142,24 +1135,22 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
 
 
 	/* Program Tx lane resets to default */
 	/* Program Tx lane resets to default */
 	mutex_lock(&dev_priv->dpio_lock);
 	mutex_lock(&dev_priv->dpio_lock);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
 			 DPIO_PCS_TX_LANE2_RESET |
 			 DPIO_PCS_TX_LANE2_RESET |
 			 DPIO_PCS_TX_LANE1_RESET);
 			 DPIO_PCS_TX_LANE1_RESET);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
 			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
 			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
 			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
 			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
 			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
 			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
 			 DPIO_PCS_CLK_SOFT_RESET);
 			 DPIO_PCS_CLK_SOFT_RESET);
 
 
 	/* Fix up inter-pair skew failure */
 	/* Fix up inter-pair skew failure */
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
-
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
-			 0x00002000);
-	vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
-			 DPIO_TX_OCALINIT_EN);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
+
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
+	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
 	mutex_unlock(&dev_priv->dpio_lock);
 	mutex_unlock(&dev_priv->dpio_lock);
 }
 }
 
 
@@ -1169,13 +1160,13 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
 	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
 	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
 	struct intel_crtc *intel_crtc =
 	struct intel_crtc *intel_crtc =
 		to_intel_crtc(encoder->base.crtc);
 		to_intel_crtc(encoder->base.crtc);
-	int port = vlv_dport_to_channel(dport);
+	enum dpio_channel port = vlv_dport_to_channel(dport);
 	int pipe = intel_crtc->pipe;
 	int pipe = intel_crtc->pipe;
 
 
 	/* Reset lanes to avoid HDMI flicker (VLV w/a) */
 	/* Reset lanes to avoid HDMI flicker (VLV w/a) */
 	mutex_lock(&dev_priv->dpio_lock);
 	mutex_lock(&dev_priv->dpio_lock);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port), 0x00000000);
-	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port), 0x00e00060);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
+	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
 	mutex_unlock(&dev_priv->dpio_lock);
 	mutex_unlock(&dev_priv->dpio_lock);
 }
 }
 
 

+ 3 - 12
drivers/gpu/drm/i915/intel_i2c.c

@@ -82,20 +82,11 @@ static int get_disp_clk_div(struct drm_i915_private *dev_priv,
 
 
 static void gmbus_set_freq(struct drm_i915_private *dev_priv)
 static void gmbus_set_freq(struct drm_i915_private *dev_priv)
 {
 {
-	int vco_freq[] = { 800, 1600, 2000, 2400 };
-	int gmbus_freq = 0, cdclk_div, hpll_freq;
+	int vco, gmbus_freq = 0, cdclk_div;
 
 
 	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
 	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
 
 
-	/* Skip setting the gmbus freq if BIOS has already programmed it */
-	if (I915_READ(GMBUSFREQ_VLV) != 0xA0)
-		return;
-
-	/* Obtain SKU information */
-	mutex_lock(&dev_priv->dpio_lock);
-	hpll_freq =
-		vlv_cck_read(dev_priv, CCK_FUSE_REG) & CCK_FUSE_HPLL_FREQ_MASK;
-	mutex_unlock(&dev_priv->dpio_lock);
+	vco = valleyview_get_vco(dev_priv);
 
 
 	/* Get the CDCLK divide ratio */
 	/* Get the CDCLK divide ratio */
 	cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
 	cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
@@ -106,7 +97,7 @@ static void gmbus_set_freq(struct drm_i915_private *dev_priv)
 	 * in fact 1MHz is the correct frequency.
 	 * in fact 1MHz is the correct frequency.
 	 */
 	 */
 	if (cdclk_div)
 	if (cdclk_div)
-		gmbus_freq = (vco_freq[hpll_freq] << 1) / cdclk_div;
+		gmbus_freq = (vco << 1) / cdclk_div;
 
 
 	if (WARN_ON(gmbus_freq == 0))
 	if (WARN_ON(gmbus_freq == 0))
 		return;
 		return;

+ 3 - 2
drivers/gpu/drm/i915/intel_lvds.c

@@ -256,8 +256,9 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
 	POSTING_READ(lvds_encoder->reg);
 	POSTING_READ(lvds_encoder->reg);
 }
 }
 
 
-static int intel_lvds_mode_valid(struct drm_connector *connector,
-				 struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_lvds_mode_valid(struct drm_connector *connector,
+		      struct drm_display_mode *mode)
 {
 {
 	struct intel_connector *intel_connector = to_intel_connector(connector);
 	struct intel_connector *intel_connector = to_intel_connector(connector);
 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;

+ 13 - 30
drivers/gpu/drm/i915/intel_opregion.c

@@ -396,13 +396,10 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
 static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_encoder *encoder;
 	struct drm_connector *connector;
 	struct drm_connector *connector;
-	struct intel_connector *intel_connector = NULL;
-	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
+	struct intel_connector *intel_connector;
+	struct intel_panel *panel;
 	struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
 	struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
-	u32 ret = 0;
-	bool found = false;
 
 
 	DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
 	DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
 
 
@@ -414,38 +411,24 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 		return ASLC_BACKLIGHT_FAILED;
 		return ASLC_BACKLIGHT_FAILED;
 
 
 	mutex_lock(&dev->mode_config.mutex);
 	mutex_lock(&dev->mode_config.mutex);
+
 	/*
 	/*
-	 * Could match the OpRegion connector here instead, but we'd also need
-	 * to verify the connector could handle a backlight call.
+	 * Update backlight on all connectors that support backlight (usually
+	 * only one).
 	 */
 	 */
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
-		if (encoder->crtc == crtc) {
-			found = true;
-			break;
-		}
-
-	if (!found) {
-		ret = ASLC_BACKLIGHT_FAILED;
-		goto out;
-	}
-
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
-		if (connector->encoder == encoder)
-			intel_connector = to_intel_connector(connector);
-
-	if (!intel_connector) {
-		ret = ASLC_BACKLIGHT_FAILED;
-		goto out;
-	}
-
 	DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
 	DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
-	intel_panel_set_backlight(intel_connector, bclp, 255);
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		intel_connector = to_intel_connector(connector);
+		panel = &intel_connector->panel;
+		if (panel->backlight.present)
+			intel_panel_set_backlight(intel_connector, bclp, 255);
+	}
 	iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
 	iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
 
 
-out:
 	mutex_unlock(&dev->mode_config.mutex);
 	mutex_unlock(&dev->mode_config.mutex);
 
 
-	return ret;
+
+	return 0;
 }
 }
 
 
 static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
 static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)

+ 594 - 288
drivers/gpu/drm/i915/intel_panel.c

@@ -325,203 +325,170 @@ out:
 	pipe_config->gmch_pfit.lvds_border_bits = border;
 	pipe_config->gmch_pfit.lvds_border_bits = border;
 }
 }
 
 
-static int is_backlight_combination_mode(struct drm_device *dev)
+static int i915_panel_invert_brightness;
+MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
+	"(-1 force normal, 0 machine defaults, 1 force inversion), please "
+	"report PCI device ID, subsystem vendor and subsystem device ID "
+	"to dri-devel@lists.freedesktop.org, if your machine needs it. "
+	"It will then be included in an upcoming module version.");
+module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
+static u32 intel_panel_compute_brightness(struct intel_connector *connector,
+					  u32 val)
 {
 {
+	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
 
 
-	if (IS_GEN4(dev))
-		return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
+	WARN_ON(panel->backlight.max == 0);
 
 
-	if (IS_GEN2(dev))
-		return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
+	if (i915_panel_invert_brightness < 0)
+		return val;
 
 
-	return 0;
+	if (i915_panel_invert_brightness > 0 ||
+	    dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
+		return panel->backlight.max - val;
+	}
+
+	return val;
 }
 }
 
 
-/* XXX: query mode clock or hardware clock and program max PWM appropriately
- * when it's 0.
- */
-static u32 i915_read_blc_pwm_ctl(struct drm_device *dev, enum pipe pipe)
+static u32 bdw_get_backlight(struct intel_connector *connector)
 {
 {
+	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 val;
-
-	WARN_ON_SMP(!spin_is_locked(&dev_priv->backlight.lock));
-
-	/* Restore the CTL value if it lost, e.g. GPU reset */
 
 
-	if (HAS_PCH_SPLIT(dev_priv->dev)) {
-		val = I915_READ(BLC_PWM_PCH_CTL2);
-		if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
-			dev_priv->regfile.saveBLC_PWM_CTL2 = val;
-		} else if (val == 0) {
-			val = dev_priv->regfile.saveBLC_PWM_CTL2;
-			I915_WRITE(BLC_PWM_PCH_CTL2, val);
-		}
-	} else if (IS_VALLEYVIEW(dev)) {
-		val = I915_READ(VLV_BLC_PWM_CTL(pipe));
-		if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
-			dev_priv->regfile.saveBLC_PWM_CTL = val;
-			dev_priv->regfile.saveBLC_PWM_CTL2 =
-				I915_READ(VLV_BLC_PWM_CTL2(pipe));
-		} else if (val == 0) {
-			val = dev_priv->regfile.saveBLC_PWM_CTL;
-			I915_WRITE(VLV_BLC_PWM_CTL(pipe), val);
-			I915_WRITE(VLV_BLC_PWM_CTL2(pipe),
-				   dev_priv->regfile.saveBLC_PWM_CTL2);
-		}
+	return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
 
 
-		if (!val)
-			val = 0x0f42ffff;
-	} else {
-		val = I915_READ(BLC_PWM_CTL);
-		if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
-			dev_priv->regfile.saveBLC_PWM_CTL = val;
-			if (INTEL_INFO(dev)->gen >= 4)
-				dev_priv->regfile.saveBLC_PWM_CTL2 =
-					I915_READ(BLC_PWM_CTL2);
-		} else if (val == 0) {
-			val = dev_priv->regfile.saveBLC_PWM_CTL;
-			I915_WRITE(BLC_PWM_CTL, val);
-			if (INTEL_INFO(dev)->gen >= 4)
-				I915_WRITE(BLC_PWM_CTL2,
-					   dev_priv->regfile.saveBLC_PWM_CTL2);
-		}
-	}
+static u32 pch_get_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 
 
-	return val;
+	return I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
 }
 }
 
 
-static u32 intel_panel_get_max_backlight(struct drm_device *dev,
-					 enum pipe pipe)
+static u32 i9xx_get_backlight(struct intel_connector *connector)
 {
 {
-	u32 max;
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 val;
 
 
-	max = i915_read_blc_pwm_ctl(dev, pipe);
+	val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+	if (INTEL_INFO(dev)->gen < 4)
+		val >>= 1;
 
 
-	if (HAS_PCH_SPLIT(dev)) {
-		max >>= 16;
-	} else {
-		if (INTEL_INFO(dev)->gen < 4)
-			max >>= 17;
-		else
-			max >>= 16;
+	if (panel->backlight.combination_mode) {
+		u8 lbpc;
 
 
-		if (is_backlight_combination_mode(dev))
-			max *= 0xff;
+		pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
+		val *= lbpc;
 	}
 	}
 
 
-	DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
-
-	return max;
+	return val;
 }
 }
 
 
-static int i915_panel_invert_brightness;
-MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
-	"(-1 force normal, 0 machine defaults, 1 force inversion), please "
-	"report PCI device ID, subsystem vendor and subsystem device ID "
-	"to dri-devel@lists.freedesktop.org, if your machine needs it. "
-	"It will then be included in an upcoming module version.");
-module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
-static u32 intel_panel_compute_brightness(struct drm_device *dev,
-					  enum pipe pipe, u32 val)
+static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 
-	if (i915_panel_invert_brightness < 0)
-		return val;
+	return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
 
 
-	if (i915_panel_invert_brightness > 0 ||
-	    dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
-		u32 max = intel_panel_get_max_backlight(dev, pipe);
-		if (max)
-			return max - val;
-	}
+static u32 vlv_get_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
 
 
-	return val;
+	return _vlv_get_backlight(dev, pipe);
 }
 }
 
 
-static u32 intel_panel_get_backlight(struct drm_device *dev,
-				     enum pipe pipe)
+static u32 intel_panel_get_backlight(struct intel_connector *connector)
 {
 {
+	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val;
 	u32 val;
 	unsigned long flags;
 	unsigned long flags;
-	int reg;
 
 
-	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+	spin_lock_irqsave(&dev_priv->backlight_lock, flags);
 
 
-	if (HAS_PCH_SPLIT(dev)) {
-		val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
-	} else {
-		if (IS_VALLEYVIEW(dev))
-			reg = VLV_BLC_PWM_CTL(pipe);
-		else
-			reg = BLC_PWM_CTL;
-
-		val = I915_READ(reg) & BACKLIGHT_DUTY_CYCLE_MASK;
-		if (INTEL_INFO(dev)->gen < 4)
-			val >>= 1;
+	val = dev_priv->display.get_backlight(connector);
+	val = intel_panel_compute_brightness(connector, val);
 
 
-		if (is_backlight_combination_mode(dev)) {
-			u8 lbpc;
-
-			pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
-			val *= lbpc;
-		}
-	}
-
-	val = intel_panel_compute_brightness(dev, pipe, val);
-
-	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+	spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
 
 
 	DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
 	DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
 	return val;
 	return val;
 }
 }
 
 
-static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
+static void bdw_set_backlight(struct intel_connector *connector, u32 level)
 {
 {
+	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
-	I915_WRITE(BLC_PWM_CPU_CTL, val | level);
+	u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+	I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
 }
 }
 
 
-static void intel_panel_actually_set_backlight(struct drm_device *dev,
-					       enum pipe pipe, u32 level)
+static void pch_set_backlight(struct intel_connector *connector, u32 level)
 {
 {
+	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 tmp;
 	u32 tmp;
-	int reg;
 
 
-	DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
-	level = intel_panel_compute_brightness(dev, pipe, level);
+	tmp = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+	I915_WRITE(BLC_PWM_CPU_CTL, tmp | level);
+}
+
+static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 tmp, mask;
 
 
-	if (HAS_PCH_SPLIT(dev))
-		return intel_pch_panel_set_backlight(dev, level);
+	WARN_ON(panel->backlight.max == 0);
 
 
-	if (is_backlight_combination_mode(dev)) {
-		u32 max = intel_panel_get_max_backlight(dev, pipe);
+	if (panel->backlight.combination_mode) {
 		u8 lbpc;
 		u8 lbpc;
 
 
-		/* we're screwed, but keep behaviour backwards compatible */
-		if (!max)
-			max = 1;
-
-		lbpc = level * 0xfe / max + 1;
+		lbpc = level * 0xfe / panel->backlight.max + 1;
 		level /= lbpc;
 		level /= lbpc;
 		pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
 		pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
 	}
 	}
 
 
-	if (IS_VALLEYVIEW(dev))
-		reg = VLV_BLC_PWM_CTL(pipe);
-	else
-		reg = BLC_PWM_CTL;
-
-	tmp = I915_READ(reg);
-	if (INTEL_INFO(dev)->gen < 4)
+	if (IS_GEN4(dev)) {
+		mask = BACKLIGHT_DUTY_CYCLE_MASK;
+	} else {
 		level <<= 1;
 		level <<= 1;
-	tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
-	I915_WRITE(reg, tmp | level);
+		mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV;
+	}
+
+	tmp = I915_READ(BLC_PWM_CTL) & ~mask;
+	I915_WRITE(BLC_PWM_CTL, tmp | level);
+}
+
+static void vlv_set_backlight(struct intel_connector *connector, u32 level)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
+	u32 tmp;
+
+	tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+	I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
+}
+
+static void
+intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+
+	level = intel_panel_compute_brightness(connector, level);
+	dev_priv->display.set_backlight(connector, level);
 }
 }
 
 
 /* set backlight brightness to level in range [0..max] */
 /* set backlight brightness to level in range [0..max] */
@@ -530,6 +497,7 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
 {
 {
 	struct drm_device *dev = connector->base.dev;
 	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
 	enum pipe pipe = intel_get_pipe_from_connector(connector);
 	enum pipe pipe = intel_get_pipe_from_connector(connector);
 	u32 freq;
 	u32 freq;
 	unsigned long flags;
 	unsigned long flags;
@@ -537,34 +505,77 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
 	if (pipe == INVALID_PIPE)
 	if (pipe == INVALID_PIPE)
 		return;
 		return;
 
 
-	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+	spin_lock_irqsave(&dev_priv->backlight_lock, flags);
 
 
-	freq = intel_panel_get_max_backlight(dev, pipe);
-	if (!freq) {
-		/* we are screwed, bail out */
-		goto out;
-	}
+	WARN_ON(panel->backlight.max == 0);
 
 
-	/* scale to hardware, but be careful to not overflow */
+	/* scale to hardware max, but be careful to not overflow */
+	freq = panel->backlight.max;
 	if (freq < max)
 	if (freq < max)
 		level = level * freq / max;
 		level = level * freq / max;
 	else
 	else
 		level = freq / max * level;
 		level = freq / max * level;
 
 
-	dev_priv->backlight.level = level;
-	if (dev_priv->backlight.device)
-		dev_priv->backlight.device->props.brightness = level;
+	panel->backlight.level = level;
+	if (panel->backlight.device)
+		panel->backlight.device->props.brightness = level;
 
 
-	if (dev_priv->backlight.enabled)
-		intel_panel_actually_set_backlight(dev, pipe, level);
-out:
-	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+	if (panel->backlight.enabled)
+		intel_panel_actually_set_backlight(connector, level);
+
+	spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+}
+
+static void pch_disable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp;
+
+	intel_panel_actually_set_backlight(connector, 0);
+
+	tmp = I915_READ(BLC_PWM_CPU_CTL2);
+	I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+
+	tmp = I915_READ(BLC_PWM_PCH_CTL1);
+	I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+}
+
+static void i9xx_disable_backlight(struct intel_connector *connector)
+{
+	intel_panel_actually_set_backlight(connector, 0);
+}
+
+static void i965_disable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp;
+
+	intel_panel_actually_set_backlight(connector, 0);
+
+	tmp = I915_READ(BLC_PWM_CTL2);
+	I915_WRITE(BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
+}
+
+static void vlv_disable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
+	u32 tmp;
+
+	intel_panel_actually_set_backlight(connector, 0);
+
+	tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+	I915_WRITE(VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
 }
 }
 
 
 void intel_panel_disable_backlight(struct intel_connector *connector)
 void intel_panel_disable_backlight(struct intel_connector *connector)
 {
 {
 	struct drm_device *dev = connector->base.dev;
 	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
 	enum pipe pipe = intel_get_pipe_from_connector(connector);
 	enum pipe pipe = intel_get_pipe_from_connector(connector);
 	unsigned long flags;
 	unsigned long flags;
 
 
@@ -582,141 +593,215 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
 		return;
 		return;
 	}
 	}
 
 
-	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+	spin_lock_irqsave(&dev_priv->backlight_lock, flags);
 
 
-	dev_priv->backlight.enabled = false;
-	intel_panel_actually_set_backlight(dev, pipe, 0);
+	panel->backlight.enabled = false;
+	dev_priv->display.disable_backlight(connector);
 
 
-	if (INTEL_INFO(dev)->gen >= 4) {
-		uint32_t reg, tmp;
+	spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+}
 
 
-		if (HAS_PCH_SPLIT(dev))
-			reg = BLC_PWM_CPU_CTL2;
-		else if (IS_VALLEYVIEW(dev))
-			reg = VLV_BLC_PWM_CTL2(pipe);
-		else
-			reg = BLC_PWM_CTL2;
+static void bdw_enable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 pch_ctl1, pch_ctl2;
+
+	pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+	if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
+		DRM_DEBUG_KMS("pch backlight already enabled\n");
+		pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
+		I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+	}
 
 
-		I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE);
+	pch_ctl2 = panel->backlight.max << 16;
+	I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
 
 
-		if (HAS_PCH_SPLIT(dev)) {
-			tmp = I915_READ(BLC_PWM_PCH_CTL1);
-			tmp &= ~BLM_PCH_PWM_ENABLE;
-			I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
-		}
-	}
+	pch_ctl1 = 0;
+	if (panel->backlight.active_low_pwm)
+		pch_ctl1 |= BLM_PCH_POLARITY;
 
 
-	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+	/* BDW always uses the pch pwm controls. */
+	pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
+
+	I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+	POSTING_READ(BLC_PWM_PCH_CTL1);
+	I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+
+	/* This won't stick until the above enable. */
+	intel_panel_actually_set_backlight(connector, panel->backlight.level);
 }
 }
 
 
-void intel_panel_enable_backlight(struct intel_connector *connector)
+static void pch_enable_backlight(struct intel_connector *connector)
 {
 {
 	struct drm_device *dev = connector->base.dev;
 	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
 	enum pipe pipe = intel_get_pipe_from_connector(connector);
 	enum pipe pipe = intel_get_pipe_from_connector(connector);
 	enum transcoder cpu_transcoder =
 	enum transcoder cpu_transcoder =
 		intel_pipe_to_cpu_transcoder(dev_priv, pipe);
 		intel_pipe_to_cpu_transcoder(dev_priv, pipe);
-	unsigned long flags;
+	u32 cpu_ctl2, pch_ctl1, pch_ctl2;
 
 
-	if (pipe == INVALID_PIPE)
-		return;
+	cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+	if (cpu_ctl2 & BLM_PWM_ENABLE) {
+		WARN(1, "cpu backlight already enabled\n");
+		cpu_ctl2 &= ~BLM_PWM_ENABLE;
+		I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
+	}
 
 
-	DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+	pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+	if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
+		DRM_DEBUG_KMS("pch backlight already enabled\n");
+		pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
+		I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+	}
 
 
-	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+	if (cpu_transcoder == TRANSCODER_EDP)
+		cpu_ctl2 = BLM_TRANSCODER_EDP;
+	else
+		cpu_ctl2 = BLM_PIPE(cpu_transcoder);
+	I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
+	POSTING_READ(BLC_PWM_CPU_CTL2);
+	I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
+
+	/* This won't stick until the above enable. */
+	intel_panel_actually_set_backlight(connector, panel->backlight.level);
+
+	pch_ctl2 = panel->backlight.max << 16;
+	I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
+
+	pch_ctl1 = 0;
+	if (panel->backlight.active_low_pwm)
+		pch_ctl1 |= BLM_PCH_POLARITY;
+
+	I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+	POSTING_READ(BLC_PWM_PCH_CTL1);
+	I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+}
+
+static void i9xx_enable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 ctl, freq;
 
 
-	if (dev_priv->backlight.level == 0) {
-		dev_priv->backlight.level = intel_panel_get_max_backlight(dev,
-									  pipe);
-		if (dev_priv->backlight.device)
-			dev_priv->backlight.device->props.brightness =
-				dev_priv->backlight.level;
+	ctl = I915_READ(BLC_PWM_CTL);
+	if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
+		WARN(1, "backlight already enabled\n");
+		I915_WRITE(BLC_PWM_CTL, 0);
 	}
 	}
 
 
-	if (INTEL_INFO(dev)->gen >= 4) {
-		uint32_t reg, tmp;
+	freq = panel->backlight.max;
+	if (panel->backlight.combination_mode)
+		freq /= 0xff;
 
 
-		if (HAS_PCH_SPLIT(dev))
-			reg = BLC_PWM_CPU_CTL2;
-		else if (IS_VALLEYVIEW(dev))
-			reg = VLV_BLC_PWM_CTL2(pipe);
-		else
-			reg = BLC_PWM_CTL2;
+	ctl = freq << 17;
+	if (IS_GEN2(dev) && panel->backlight.combination_mode)
+		ctl |= BLM_LEGACY_MODE;
+	if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm)
+		ctl |= BLM_POLARITY_PNV;
 
 
-		tmp = I915_READ(reg);
+	I915_WRITE(BLC_PWM_CTL, ctl);
+	POSTING_READ(BLC_PWM_CTL);
 
 
-		/* Note that this can also get called through dpms changes. And
-		 * we don't track the backlight dpms state, hence check whether
-		 * we have to do anything first. */
-		if (tmp & BLM_PWM_ENABLE)
-			goto set_level;
+	/* XXX: combine this into above write? */
+	intel_panel_actually_set_backlight(connector, panel->backlight.level);
+}
 
 
-		if (INTEL_INFO(dev)->num_pipes == 3)
-			tmp &= ~BLM_PIPE_SELECT_IVB;
-		else
-			tmp &= ~BLM_PIPE_SELECT;
+static void i965_enable_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
+	u32 ctl, ctl2, freq;
 
 
-		if (cpu_transcoder == TRANSCODER_EDP)
-			tmp |= BLM_TRANSCODER_EDP;
-		else
-			tmp |= BLM_PIPE(cpu_transcoder);
-		tmp &= ~BLM_PWM_ENABLE;
-
-		I915_WRITE(reg, tmp);
-		POSTING_READ(reg);
-		I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
-
-		if (HAS_PCH_SPLIT(dev) &&
-		    !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
-			tmp = I915_READ(BLC_PWM_PCH_CTL1);
-			tmp |= BLM_PCH_PWM_ENABLE;
-			tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
-			I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
-		}
+	ctl2 = I915_READ(BLC_PWM_CTL2);
+	if (ctl2 & BLM_PWM_ENABLE) {
+		WARN(1, "backlight already enabled\n");
+		ctl2 &= ~BLM_PWM_ENABLE;
+		I915_WRITE(BLC_PWM_CTL2, ctl2);
 	}
 	}
 
 
-set_level:
-	/* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
-	 * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
-	 * registers are set.
-	 */
-	dev_priv->backlight.enabled = true;
-	intel_panel_actually_set_backlight(dev, pipe,
-					   dev_priv->backlight.level);
+	freq = panel->backlight.max;
+	if (panel->backlight.combination_mode)
+		freq /= 0xff;
+
+	ctl = freq << 16;
+	I915_WRITE(BLC_PWM_CTL, ctl);
+
+	/* XXX: combine this into above write? */
+	intel_panel_actually_set_backlight(connector, panel->backlight.level);
 
 
-	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+	ctl2 = BLM_PIPE(pipe);
+	if (panel->backlight.combination_mode)
+		ctl2 |= BLM_COMBINATION_MODE;
+	if (panel->backlight.active_low_pwm)
+		ctl2 |= BLM_POLARITY_I965;
+	I915_WRITE(BLC_PWM_CTL2, ctl2);
+	POSTING_READ(BLC_PWM_CTL2);
+	I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
 }
 }
 
 
-/* FIXME: use VBT vals to init PWM_CTL and PWM_CTL2 correctly */
-static void intel_panel_init_backlight_regs(struct drm_device *dev)
+static void vlv_enable_backlight(struct intel_connector *connector)
 {
 {
+	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
+	u32 ctl, ctl2;
 
 
-	if (IS_VALLEYVIEW(dev)) {
-		enum pipe pipe;
+	ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+	if (ctl2 & BLM_PWM_ENABLE) {
+		WARN(1, "backlight already enabled\n");
+		ctl2 &= ~BLM_PWM_ENABLE;
+		I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
+	}
 
 
-		for_each_pipe(pipe) {
-			u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
+	ctl = panel->backlight.max << 16;
+	I915_WRITE(VLV_BLC_PWM_CTL(pipe), ctl);
 
 
-			/* Skip if the modulation freq is already set */
-			if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
-				continue;
+	/* XXX: combine this into above write? */
+	intel_panel_actually_set_backlight(connector, panel->backlight.level);
 
 
-			cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
-			I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
-				   cur_val);
-		}
-	}
+	ctl2 = 0;
+	if (panel->backlight.active_low_pwm)
+		ctl2 |= BLM_POLARITY_I965;
+	I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
+	POSTING_READ(VLV_BLC_PWM_CTL2(pipe));
+	I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
 }
 }
 
 
-static void intel_panel_init_backlight(struct drm_device *dev)
+void intel_panel_enable_backlight(struct intel_connector *connector)
 {
 {
+	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
+	unsigned long flags;
+
+	if (pipe == INVALID_PIPE)
+		return;
+
+	DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+
+	spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+
+	WARN_ON(panel->backlight.max == 0);
 
 
-	intel_panel_init_backlight_regs(dev);
+	if (panel->backlight.level == 0) {
+		panel->backlight.level = panel->backlight.max;
+		if (panel->backlight.device)
+			panel->backlight.device->props.brightness =
+				panel->backlight.level;
+	}
+
+	dev_priv->display.enable_backlight(connector);
+	panel->backlight.enabled = true;
 
 
-	dev_priv->backlight.level = intel_panel_get_backlight(dev, 0);
-	dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
+	spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
 }
 }
 
 
 enum drm_connector_status
 enum drm_connector_status
@@ -742,7 +827,7 @@ intel_panel_detect(struct drm_device *dev)
 }
 }
 
 
 #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
 #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
-static int intel_panel_update_status(struct backlight_device *bd)
+static int intel_backlight_device_update_status(struct backlight_device *bd)
 {
 {
 	struct intel_connector *connector = bl_get_data(bd);
 	struct intel_connector *connector = bl_get_data(bd);
 	struct drm_device *dev = connector->base.dev;
 	struct drm_device *dev = connector->base.dev;
@@ -756,85 +841,306 @@ static int intel_panel_update_status(struct backlight_device *bd)
 	return 0;
 	return 0;
 }
 }
 
 
-static int intel_panel_get_brightness(struct backlight_device *bd)
+static int intel_backlight_device_get_brightness(struct backlight_device *bd)
 {
 {
 	struct intel_connector *connector = bl_get_data(bd);
 	struct intel_connector *connector = bl_get_data(bd);
 	struct drm_device *dev = connector->base.dev;
 	struct drm_device *dev = connector->base.dev;
-	enum pipe pipe;
+	int ret;
 
 
 	mutex_lock(&dev->mode_config.mutex);
 	mutex_lock(&dev->mode_config.mutex);
-	pipe = intel_get_pipe_from_connector(connector);
+	ret = intel_panel_get_backlight(connector);
 	mutex_unlock(&dev->mode_config.mutex);
 	mutex_unlock(&dev->mode_config.mutex);
-	if (pipe == INVALID_PIPE)
-		return 0;
 
 
-	return intel_panel_get_backlight(connector->base.dev, pipe);
+	return ret;
 }
 }
 
 
-static const struct backlight_ops intel_panel_bl_ops = {
-	.update_status = intel_panel_update_status,
-	.get_brightness = intel_panel_get_brightness,
+static const struct backlight_ops intel_backlight_device_ops = {
+	.update_status = intel_backlight_device_update_status,
+	.get_brightness = intel_backlight_device_get_brightness,
 };
 };
 
 
-int intel_panel_setup_backlight(struct drm_connector *connector)
+static int intel_backlight_device_register(struct intel_connector *connector)
 {
 {
-	struct drm_device *dev = connector->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
 	struct backlight_properties props;
 	struct backlight_properties props;
-	unsigned long flags;
 
 
-	intel_panel_init_backlight(dev);
-
-	if (WARN_ON(dev_priv->backlight.device))
+	if (WARN_ON(panel->backlight.device))
 		return -ENODEV;
 		return -ENODEV;
 
 
+	BUG_ON(panel->backlight.max == 0);
+
 	memset(&props, 0, sizeof(props));
 	memset(&props, 0, sizeof(props));
 	props.type = BACKLIGHT_RAW;
 	props.type = BACKLIGHT_RAW;
-	props.brightness = dev_priv->backlight.level;
+	props.brightness = panel->backlight.level;
+	props.max_brightness = panel->backlight.max;
 
 
-	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
-	props.max_brightness = intel_panel_get_max_backlight(dev, 0);
-	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
-
-	if (props.max_brightness == 0) {
-		DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
-		return -ENODEV;
-	}
-	dev_priv->backlight.device =
+	/*
+	 * Note: using the same name independent of the connector prevents
+	 * registration of multiple backlight devices in the driver.
+	 */
+	panel->backlight.device =
 		backlight_device_register("intel_backlight",
 		backlight_device_register("intel_backlight",
-					  connector->kdev,
-					  to_intel_connector(connector),
-					  &intel_panel_bl_ops, &props);
+					  connector->base.kdev,
+					  connector,
+					  &intel_backlight_device_ops, &props);
 
 
-	if (IS_ERR(dev_priv->backlight.device)) {
+	if (IS_ERR(panel->backlight.device)) {
 		DRM_ERROR("Failed to register backlight: %ld\n",
 		DRM_ERROR("Failed to register backlight: %ld\n",
-			  PTR_ERR(dev_priv->backlight.device));
-		dev_priv->backlight.device = NULL;
+			  PTR_ERR(panel->backlight.device));
+		panel->backlight.device = NULL;
 		return -ENODEV;
 		return -ENODEV;
 	}
 	}
 	return 0;
 	return 0;
 }
 }
 
 
-void intel_panel_destroy_backlight(struct drm_device *dev)
+static void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+	struct intel_panel *panel = &connector->panel;
+
+	if (panel->backlight.device) {
+		backlight_device_unregister(panel->backlight.device);
+		panel->backlight.device = NULL;
+	}
+}
+#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+static int intel_backlight_device_register(struct intel_connector *connector)
+{
+	return 0;
+}
+static void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+}
+#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+/*
+ * Note: The setup hooks can't assume pipe is set!
+ *
+ * XXX: Query mode clock or hardware clock and program PWM modulation frequency
+ * appropriately when it's 0. Use VBT and/or sane defaults.
+ */
+static int bdw_setup_backlight(struct intel_connector *connector)
 {
 {
+	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	if (dev_priv->backlight.device) {
-		backlight_device_unregister(dev_priv->backlight.device);
-		dev_priv->backlight.device = NULL;
+	struct intel_panel *panel = &connector->panel;
+	u32 pch_ctl1, pch_ctl2, val;
+
+	pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+	panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
+
+	pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+	panel->backlight.max = pch_ctl2 >> 16;
+	if (!panel->backlight.max)
+		return -ENODEV;
+
+	val = bdw_get_backlight(connector);
+	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+	panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) &&
+		panel->backlight.level != 0;
+
+	return 0;
+}
+
+static int pch_setup_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
+
+	pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+	panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
+
+	pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+	panel->backlight.max = pch_ctl2 >> 16;
+	if (!panel->backlight.max)
+		return -ENODEV;
+
+	val = pch_get_backlight(connector);
+	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+	cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+	panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
+		(pch_ctl1 & BLM_PCH_PWM_ENABLE) && panel->backlight.level != 0;
+
+	return 0;
+}
+
+static int i9xx_setup_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 ctl, val;
+
+	ctl = I915_READ(BLC_PWM_CTL);
+
+	if (IS_GEN2(dev))
+		panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
+
+	if (IS_PINEVIEW(dev))
+		panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
+
+	panel->backlight.max = ctl >> 17;
+	if (panel->backlight.combination_mode)
+		panel->backlight.max *= 0xff;
+
+	if (!panel->backlight.max)
+		return -ENODEV;
+
+	val = i9xx_get_backlight(connector);
+	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+	panel->backlight.enabled = panel->backlight.level != 0;
+
+	return 0;
+}
+
+static int i965_setup_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	u32 ctl, ctl2, val;
+
+	ctl2 = I915_READ(BLC_PWM_CTL2);
+	panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
+	panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
+
+	ctl = I915_READ(BLC_PWM_CTL);
+	panel->backlight.max = ctl >> 16;
+	if (panel->backlight.combination_mode)
+		panel->backlight.max *= 0xff;
+
+	if (!panel->backlight.max)
+		return -ENODEV;
+
+	val = i9xx_get_backlight(connector);
+	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+	panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
+		panel->backlight.level != 0;
+
+	return 0;
+}
+
+static int vlv_setup_backlight(struct intel_connector *connector)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_panel *panel = &connector->panel;
+	enum pipe pipe;
+	u32 ctl, ctl2, val;
+
+	for_each_pipe(pipe) {
+		u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
+
+		/* Skip if the modulation freq is already set */
+		if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
+			continue;
+
+		cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
+		I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
+			   cur_val);
 	}
 	}
+
+	ctl2 = I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
+	panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
+
+	ctl = I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
+	panel->backlight.max = ctl >> 16;
+	if (!panel->backlight.max)
+		return -ENODEV;
+
+	val = _vlv_get_backlight(dev, PIPE_A);
+	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+	panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
+		panel->backlight.level != 0;
+
+	return 0;
 }
 }
-#else
+
 int intel_panel_setup_backlight(struct drm_connector *connector)
 int intel_panel_setup_backlight(struct drm_connector *connector)
 {
 {
-	intel_panel_init_backlight(connector->dev);
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_panel *panel = &intel_connector->panel;
+	unsigned long flags;
+	int ret;
+
+	/* set level and max in panel struct */
+	spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+	ret = dev_priv->display.setup_backlight(intel_connector);
+	spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+
+	if (ret) {
+		DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
+			      drm_get_connector_name(connector));
+		return ret;
+	}
+
+	intel_backlight_device_register(intel_connector);
+
+	panel->backlight.present = true;
+
+	DRM_DEBUG_KMS("backlight initialized, %s, brightness %u/%u, "
+		      "sysfs interface %sregistered\n",
+		      panel->backlight.enabled ? "enabled" : "disabled",
+		      panel->backlight.level, panel->backlight.max,
+		      panel->backlight.device ? "" : "not ");
+
 	return 0;
 	return 0;
 }
 }
 
 
-void intel_panel_destroy_backlight(struct drm_device *dev)
+void intel_panel_destroy_backlight(struct drm_connector *connector)
 {
 {
-	return;
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_panel *panel = &intel_connector->panel;
+
+	panel->backlight.present = false;
+	intel_backlight_device_unregister(intel_connector);
+}
+
+/* Set up chip specific backlight functions */
+void intel_panel_init_backlight_funcs(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_BROADWELL(dev)) {
+		dev_priv->display.setup_backlight = bdw_setup_backlight;
+		dev_priv->display.enable_backlight = bdw_enable_backlight;
+		dev_priv->display.disable_backlight = pch_disable_backlight;
+		dev_priv->display.set_backlight = bdw_set_backlight;
+		dev_priv->display.get_backlight = bdw_get_backlight;
+	} else if (HAS_PCH_SPLIT(dev)) {
+		dev_priv->display.setup_backlight = pch_setup_backlight;
+		dev_priv->display.enable_backlight = pch_enable_backlight;
+		dev_priv->display.disable_backlight = pch_disable_backlight;
+		dev_priv->display.set_backlight = pch_set_backlight;
+		dev_priv->display.get_backlight = pch_get_backlight;
+	} else if (IS_VALLEYVIEW(dev)) {
+		dev_priv->display.setup_backlight = vlv_setup_backlight;
+		dev_priv->display.enable_backlight = vlv_enable_backlight;
+		dev_priv->display.disable_backlight = vlv_disable_backlight;
+		dev_priv->display.set_backlight = vlv_set_backlight;
+		dev_priv->display.get_backlight = vlv_get_backlight;
+	} else if (IS_GEN4(dev)) {
+		dev_priv->display.setup_backlight = i965_setup_backlight;
+		dev_priv->display.enable_backlight = i965_enable_backlight;
+		dev_priv->display.disable_backlight = i965_disable_backlight;
+		dev_priv->display.set_backlight = i9xx_set_backlight;
+		dev_priv->display.get_backlight = i9xx_get_backlight;
+	} else {
+		dev_priv->display.setup_backlight = i9xx_setup_backlight;
+		dev_priv->display.enable_backlight = i9xx_enable_backlight;
+		dev_priv->display.disable_backlight = i9xx_disable_backlight;
+		dev_priv->display.set_backlight = i9xx_set_backlight;
+		dev_priv->display.get_backlight = i9xx_get_backlight;
+	}
 }
 }
-#endif
 
 
 int intel_panel_init(struct intel_panel *panel,
 int intel_panel_init(struct intel_panel *panel,
 		     struct drm_display_mode *fixed_mode)
 		     struct drm_display_mode *fixed_mode)

+ 224 - 172
drivers/gpu/drm/i915/intel_pm.c

@@ -191,7 +191,11 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
 	u32 blt_ecoskpd;
 	u32 blt_ecoskpd;
 
 
 	/* Make sure blitter notifies FBC of writes */
 	/* Make sure blitter notifies FBC of writes */
-	gen6_gt_force_wake_get(dev_priv);
+
+	/* Blitter is part of Media powerwell on VLV. No impact of
+	 * his param in other platforms for now */
+	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
+
 	blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
 	blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
 	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
 	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
 		GEN6_BLITTER_LOCK_SHIFT;
 		GEN6_BLITTER_LOCK_SHIFT;
@@ -202,7 +206,8 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
 			 GEN6_BLITTER_LOCK_SHIFT);
 			 GEN6_BLITTER_LOCK_SHIFT);
 	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
 	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
 	POSTING_READ(GEN6_BLITTER_ECOSKPD);
 	POSTING_READ(GEN6_BLITTER_ECOSKPD);
-	gen6_gt_force_wake_put(dev_priv);
+
+	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
 }
 }
 
 
 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
@@ -222,7 +227,9 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 	dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
 	dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
 	/* Set persistent mode for front-buffer rendering, ala X. */
 	/* Set persistent mode for front-buffer rendering, ala X. */
 	dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
 	dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
-	dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
+	dpfc_ctl |= DPFC_CTL_FENCE_EN;
+	if (IS_GEN5(dev))
+		dpfc_ctl |= obj->fence_reg;
 	I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
 	I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
 
 
 	I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
 	I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
@@ -295,7 +302,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 
 
 	sandybridge_blit_fbc_update(dev);
 	sandybridge_blit_fbc_update(dev);
 
 
-	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
 }
 }
 
 
 bool intel_fbc_enabled(struct drm_device *dev)
 bool intel_fbc_enabled(struct drm_device *dev)
@@ -3430,26 +3437,19 @@ static void ironlake_disable_drps(struct drm_device *dev)
  * ourselves, instead of doing a rmw cycle (which might result in us clearing
  * ourselves, instead of doing a rmw cycle (which might result in us clearing
  * all limits and the gpu stuck at whatever frequency it is at atm).
  * all limits and the gpu stuck at whatever frequency it is at atm).
  */
  */
-static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
+static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
 {
 {
 	u32 limits;
 	u32 limits;
 
 
-	limits = 0;
-
-	if (*val >= dev_priv->rps.max_delay)
-		*val = dev_priv->rps.max_delay;
-	limits |= dev_priv->rps.max_delay << 24;
-
 	/* Only set the down limit when we've reached the lowest level to avoid
 	/* Only set the down limit when we've reached the lowest level to avoid
 	 * getting more interrupts, otherwise leave this clear. This prevents a
 	 * getting more interrupts, otherwise leave this clear. This prevents a
 	 * race in the hw when coming out of rc6: There's a tiny window where
 	 * race in the hw when coming out of rc6: There's a tiny window where
 	 * the hw runs at the minimal clock before selecting the desired
 	 * the hw runs at the minimal clock before selecting the desired
 	 * frequency, if the down threshold expires in that window we will not
 	 * frequency, if the down threshold expires in that window we will not
 	 * receive a down interrupt. */
 	 * receive a down interrupt. */
-	if (*val <= dev_priv->rps.min_delay) {
-		*val = dev_priv->rps.min_delay;
+	limits = dev_priv->rps.max_delay << 24;
+	if (val <= dev_priv->rps.min_delay)
 		limits |= dev_priv->rps.min_delay << 16;
 		limits |= dev_priv->rps.min_delay << 16;
-	}
 
 
 	return limits;
 	return limits;
 }
 }
@@ -3549,7 +3549,6 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
 void gen6_set_rps(struct drm_device *dev, u8 val)
 void gen6_set_rps(struct drm_device *dev, u8 val)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 limits = gen6_rps_limits(dev_priv, &val);
 
 
 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 	WARN_ON(val > dev_priv->rps.max_delay);
 	WARN_ON(val > dev_priv->rps.max_delay);
@@ -3572,7 +3571,8 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
 	/* Make sure we continue to get interrupts
 	/* Make sure we continue to get interrupts
 	 * until we hit the minimum or maximum frequencies.
 	 * until we hit the minimum or maximum frequencies.
 	 */
 	 */
-	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+		   gen6_rps_limits(dev_priv, val));
 
 
 	POSTING_READ(GEN6_RPNSWREQ);
 	POSTING_READ(GEN6_RPNSWREQ);
 
 
@@ -3607,48 +3607,18 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv)
 	mutex_unlock(&dev_priv->rps.hw_lock);
 	mutex_unlock(&dev_priv->rps.hw_lock);
 }
 }
 
 
-/*
- * Wait until the previous freq change has completed,
- * or the timeout elapsed, and then update our notion
- * of the current GPU frequency.
- */
-static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
-{
-	u32 pval;
-
-	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-
-	if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
-		DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
-
-	pval >>= 8;
-
-	if (pval != dev_priv->rps.cur_delay)
-		DRM_DEBUG_DRIVER("Punit overrode GPU freq: %d MHz (%u) requested, but got %d Mhz (%u)\n",
-				 vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay),
-				 dev_priv->rps.cur_delay,
-				 vlv_gpu_freq(dev_priv->mem_freq, pval), pval);
-
-	dev_priv->rps.cur_delay = pval;
-}
-
 void valleyview_set_rps(struct drm_device *dev, u8 val)
 void valleyview_set_rps(struct drm_device *dev, u8 val)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 
-	gen6_rps_limits(dev_priv, &val);
-
 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 	WARN_ON(val > dev_priv->rps.max_delay);
 	WARN_ON(val > dev_priv->rps.max_delay);
 	WARN_ON(val < dev_priv->rps.min_delay);
 	WARN_ON(val < dev_priv->rps.min_delay);
 
 
-	vlv_update_rps_cur_delay(dev_priv);
-
 	DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
 	DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
-			 vlv_gpu_freq(dev_priv->mem_freq,
-				      dev_priv->rps.cur_delay),
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
 			 dev_priv->rps.cur_delay,
 			 dev_priv->rps.cur_delay,
-			 vlv_gpu_freq(dev_priv->mem_freq, val), val);
+			 vlv_gpu_freq(dev_priv, val), val);
 
 
 	if (val == dev_priv->rps.cur_delay)
 	if (val == dev_priv->rps.cur_delay)
 		return;
 		return;
@@ -3657,7 +3627,7 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
 
 
 	dev_priv->rps.cur_delay = val;
 	dev_priv->rps.cur_delay = val;
 
 
-	trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
+	trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
 }
 }
 
 
 static void gen6_disable_rps_interrupts(struct drm_device *dev)
 static void gen6_disable_rps_interrupts(struct drm_device *dev)
@@ -3775,7 +3745,7 @@ static void gen8_enable_rps(struct drm_device *dev)
 
 
 	/* 1c & 1d: Get forcewake during program sequence. Although the driver
 	/* 1c & 1d: Get forcewake during program sequence. Although the driver
 	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
 	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
-	gen6_gt_force_wake_get(dev_priv);
+	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
 
 	/* 2a: Disable RC states. */
 	/* 2a: Disable RC states. */
 	I915_WRITE(GEN6_RC_CONTROL, 0);
 	I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -3832,7 +3802,7 @@ static void gen8_enable_rps(struct drm_device *dev)
 
 
 	gen6_enable_rps_interrupts(dev);
 	gen6_enable_rps_interrupts(dev);
 
 
-	gen6_gt_force_wake_put(dev_priv);
+	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 }
 }
 
 
 static void gen6_enable_rps(struct drm_device *dev)
 static void gen6_enable_rps(struct drm_device *dev)
@@ -3862,7 +3832,7 @@ static void gen6_enable_rps(struct drm_device *dev)
 		I915_WRITE(GTFIFODBG, gtfifodbg);
 		I915_WRITE(GTFIFODBG, gtfifodbg);
 	}
 	}
 
 
-	gen6_gt_force_wake_get(dev_priv);
+	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
 
 	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
 	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
 	gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 	gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
@@ -3954,7 +3924,7 @@ static void gen6_enable_rps(struct drm_device *dev)
 			DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
 			DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
 	}
 	}
 
 
-	gen6_gt_force_wake_put(dev_priv);
+	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 }
 }
 
 
 void gen6_update_ring_freq(struct drm_device *dev)
 void gen6_update_ring_freq(struct drm_device *dev)
@@ -4116,7 +4086,8 @@ static void valleyview_enable_rps(struct drm_device *dev)
 
 
 	valleyview_setup_pctx(dev);
 	valleyview_setup_pctx(dev);
 
 
-	gen6_gt_force_wake_get(dev_priv);
+	/* If VLV, Forcewake all wells, else re-direct to regular path */
+	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
 
 	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
 	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
 	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
 	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
@@ -4140,7 +4111,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
 	for_each_ring(ring, dev_priv, i)
 	for_each_ring(ring, dev_priv, i)
 		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
 		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
 
 
-	I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
+	I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
 
 
 	/* allows RC6 residency counter to work */
 	/* allows RC6 residency counter to work */
 	I915_WRITE(VLV_COUNTER_CONTROL,
 	I915_WRITE(VLV_COUNTER_CONTROL,
@@ -4148,65 +4119,47 @@ static void valleyview_enable_rps(struct drm_device *dev)
 				      VLV_MEDIA_RC6_COUNT_EN |
 				      VLV_MEDIA_RC6_COUNT_EN |
 				      VLV_RENDER_RC6_COUNT_EN));
 				      VLV_RENDER_RC6_COUNT_EN));
 	if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
 	if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
-		rc6_mode = GEN7_RC_CTL_TO_MODE;
+		rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
 
 
 	intel_print_rc6_info(dev, rc6_mode);
 	intel_print_rc6_info(dev, rc6_mode);
 
 
 	I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
 	I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
 
 
 	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-	switch ((val >> 6) & 3) {
-	case 0:
-	case 1:
-		dev_priv->mem_freq = 800;
-		break;
-	case 2:
-		dev_priv->mem_freq = 1066;
-		break;
-	case 3:
-		dev_priv->mem_freq = 1333;
-		break;
-	}
-	DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
 
 
 	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
 	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
 	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
 
 	dev_priv->rps.cur_delay = (val >> 8) & 0xff;
 	dev_priv->rps.cur_delay = (val >> 8) & 0xff;
 	DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
 	DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
-			 vlv_gpu_freq(dev_priv->mem_freq,
-				      dev_priv->rps.cur_delay),
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay),
 			 dev_priv->rps.cur_delay);
 			 dev_priv->rps.cur_delay);
 
 
 	dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
 	dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
 	dev_priv->rps.hw_max = dev_priv->rps.max_delay;
 	dev_priv->rps.hw_max = dev_priv->rps.max_delay;
 	DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
 	DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
-			 vlv_gpu_freq(dev_priv->mem_freq,
-				      dev_priv->rps.max_delay),
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay),
 			 dev_priv->rps.max_delay);
 			 dev_priv->rps.max_delay);
 
 
 	dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
 	dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
 	DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
 	DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
-			 vlv_gpu_freq(dev_priv->mem_freq,
-				      dev_priv->rps.rpe_delay),
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
 			 dev_priv->rps.rpe_delay);
 			 dev_priv->rps.rpe_delay);
 
 
 	dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
 	dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
 	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
 	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
-			 vlv_gpu_freq(dev_priv->mem_freq,
-				      dev_priv->rps.min_delay),
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay),
 			 dev_priv->rps.min_delay);
 			 dev_priv->rps.min_delay);
 
 
 	DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
 	DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
-			 vlv_gpu_freq(dev_priv->mem_freq,
-				      dev_priv->rps.rpe_delay),
+			 vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay),
 			 dev_priv->rps.rpe_delay);
 			 dev_priv->rps.rpe_delay);
 
 
 	valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
 	valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
 
 
 	gen6_enable_rps_interrupts(dev);
 	gen6_enable_rps_interrupts(dev);
 
 
-	gen6_gt_force_wake_put(dev_priv);
+	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 }
 }
 
 
 void ironlake_teardown_rc6(struct drm_device *dev)
 void ironlake_teardown_rc6(struct drm_device *dev)
@@ -5463,6 +5416,26 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
 static void valleyview_init_clock_gating(struct drm_device *dev)
 static void valleyview_init_clock_gating(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+	switch ((val >> 6) & 3) {
+	case 0:
+		dev_priv->mem_freq = 800;
+		break;
+	case 1:
+		dev_priv->mem_freq = 1066;
+		break;
+	case 2:
+		dev_priv->mem_freq = 1333;
+		break;
+	case 3:
+		dev_priv->mem_freq = 1333;
+		break;
+	}
+	DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
 
 
 	I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
 	I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
 
 
@@ -5642,49 +5615,78 @@ void intel_suspend_hw(struct drm_device *dev)
 		lpt_suspend_hw(dev);
 		lpt_suspend_hw(dev);
 }
 }
 
 
-static bool is_always_on_power_domain(struct drm_device *dev,
-				      enum intel_display_power_domain domain)
-{
-	unsigned long always_on_domains;
-
-	BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK);
+#define for_each_power_well(i, power_well, domain_mask, power_domains)	\
+	for (i = 0;							\
+	     i < (power_domains)->power_well_count &&			\
+		 ((power_well) = &(power_domains)->power_wells[i]);	\
+	     i++)							\
+		if ((power_well)->domains & (domain_mask))
 
 
-	if (IS_BROADWELL(dev)) {
-		always_on_domains = BDW_ALWAYS_ON_POWER_DOMAINS;
-	} else if (IS_HASWELL(dev)) {
-		always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS;
-	} else {
-		WARN_ON(1);
-		return true;
-	}
-
-	return BIT(domain) & always_on_domains;
-}
+#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
+	for (i = (power_domains)->power_well_count - 1;			 \
+	     i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
+	     i--)							 \
+		if ((power_well)->domains & (domain_mask))
 
 
 /**
 /**
  * We should only use the power well if we explicitly asked the hardware to
  * We should only use the power well if we explicitly asked the hardware to
  * enable it, so check if it's enabled and also check if we've requested it to
  * enable it, so check if it's enabled and also check if we've requested it to
  * be enabled.
  * be enabled.
  */
  */
+static bool hsw_power_well_enabled(struct drm_device *dev,
+				   struct i915_power_well *power_well)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	return I915_READ(HSW_PWR_WELL_DRIVER) ==
+		     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
+}
+
+bool intel_display_power_enabled_sw(struct drm_device *dev,
+				    enum intel_display_power_domain domain)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_power_domains *power_domains;
+
+	power_domains = &dev_priv->power_domains;
+
+	return power_domains->domain_use_count[domain];
+}
+
 bool intel_display_power_enabled(struct drm_device *dev,
 bool intel_display_power_enabled(struct drm_device *dev,
 				 enum intel_display_power_domain domain)
 				 enum intel_display_power_domain domain)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_power_domains *power_domains;
+	struct i915_power_well *power_well;
+	bool is_enabled;
+	int i;
 
 
-	if (!HAS_POWER_WELL(dev))
-		return true;
+	power_domains = &dev_priv->power_domains;
 
 
-	if (is_always_on_power_domain(dev, domain))
-		return true;
+	is_enabled = true;
 
 
-	return I915_READ(HSW_PWR_WELL_DRIVER) ==
-		     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
+	mutex_lock(&power_domains->lock);
+	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+		if (power_well->always_on)
+			continue;
+
+		if (!power_well->is_enabled(dev, power_well)) {
+			is_enabled = false;
+			break;
+		}
+	}
+	mutex_unlock(&power_domains->lock);
+
+	return is_enabled;
 }
 }
 
 
-static void __intel_set_power_well(struct drm_device *dev, bool enable)
+static void hsw_set_power_well(struct drm_device *dev,
+			       struct i915_power_well *power_well, bool enable)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	bool is_enabled, enable_requested;
 	bool is_enabled, enable_requested;
+	unsigned long irqflags;
 	uint32_t tmp;
 	uint32_t tmp;
 
 
 	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
 	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
@@ -5702,9 +5704,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
 				      HSW_PWR_WELL_STATE_ENABLED), 20))
 				      HSW_PWR_WELL_STATE_ENABLED), 20))
 				DRM_ERROR("Timeout enabling power well\n");
 				DRM_ERROR("Timeout enabling power well\n");
 		}
 		}
+
+		if (IS_BROADWELL(dev)) {
+			spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+			I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
+				   dev_priv->de_irq_mask[PIPE_B]);
+			I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
+				   ~dev_priv->de_irq_mask[PIPE_B] |
+				   GEN8_PIPE_VBLANK);
+			I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
+				   dev_priv->de_irq_mask[PIPE_C]);
+			I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
+				   ~dev_priv->de_irq_mask[PIPE_C] |
+				   GEN8_PIPE_VBLANK);
+			POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
+			spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+		}
 	} else {
 	} else {
 		if (enable_requested) {
 		if (enable_requested) {
-			unsigned long irqflags;
 			enum pipe p;
 			enum pipe p;
 
 
 			I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
 			I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
@@ -5731,16 +5748,17 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
 static void __intel_power_well_get(struct drm_device *dev,
 static void __intel_power_well_get(struct drm_device *dev,
 				   struct i915_power_well *power_well)
 				   struct i915_power_well *power_well)
 {
 {
-	if (!power_well->count++)
-		__intel_set_power_well(dev, true);
+	if (!power_well->count++ && power_well->set)
+		power_well->set(dev, power_well, true);
 }
 }
 
 
 static void __intel_power_well_put(struct drm_device *dev,
 static void __intel_power_well_put(struct drm_device *dev,
 				   struct i915_power_well *power_well)
 				   struct i915_power_well *power_well)
 {
 {
 	WARN_ON(!power_well->count);
 	WARN_ON(!power_well->count);
-	if (!--power_well->count && i915_disable_power_well)
-		__intel_set_power_well(dev, false);
+
+	if (!--power_well->count && power_well->set && i915_disable_power_well)
+		power_well->set(dev, power_well, false);
 }
 }
 
 
 void intel_display_power_get(struct drm_device *dev,
 void intel_display_power_get(struct drm_device *dev,
@@ -5748,17 +5766,18 @@ void intel_display_power_get(struct drm_device *dev,
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_power_domains *power_domains;
 	struct i915_power_domains *power_domains;
-
-	if (!HAS_POWER_WELL(dev))
-		return;
-
-	if (is_always_on_power_domain(dev, domain))
-		return;
+	struct i915_power_well *power_well;
+	int i;
 
 
 	power_domains = &dev_priv->power_domains;
 	power_domains = &dev_priv->power_domains;
 
 
 	mutex_lock(&power_domains->lock);
 	mutex_lock(&power_domains->lock);
-	__intel_power_well_get(dev, &power_domains->power_wells[0]);
+
+	for_each_power_well(i, power_well, BIT(domain), power_domains)
+		__intel_power_well_get(dev, power_well);
+
+	power_domains->domain_use_count[domain]++;
+
 	mutex_unlock(&power_domains->lock);
 	mutex_unlock(&power_domains->lock);
 }
 }
 
 
@@ -5767,17 +5786,19 @@ void intel_display_power_put(struct drm_device *dev,
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_power_domains *power_domains;
 	struct i915_power_domains *power_domains;
-
-	if (!HAS_POWER_WELL(dev))
-		return;
-
-	if (is_always_on_power_domain(dev, domain))
-		return;
+	struct i915_power_well *power_well;
+	int i;
 
 
 	power_domains = &dev_priv->power_domains;
 	power_domains = &dev_priv->power_domains;
 
 
 	mutex_lock(&power_domains->lock);
 	mutex_lock(&power_domains->lock);
-	__intel_power_well_put(dev, &power_domains->power_wells[0]);
+
+	WARN_ON(!power_domains->domain_use_count[domain]);
+	power_domains->domain_use_count[domain]--;
+
+	for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
+		__intel_power_well_put(dev, power_well);
+
 	mutex_unlock(&power_domains->lock);
 	mutex_unlock(&power_domains->lock);
 }
 }
 
 
@@ -5793,10 +5814,7 @@ void i915_request_power_well(void)
 
 
 	dev_priv = container_of(hsw_pwr, struct drm_i915_private,
 	dev_priv = container_of(hsw_pwr, struct drm_i915_private,
 				power_domains);
 				power_domains);
-
-	mutex_lock(&hsw_pwr->lock);
-	__intel_power_well_get(dev_priv->dev, &hsw_pwr->power_wells[0]);
-	mutex_unlock(&hsw_pwr->lock);
+	intel_display_power_get(dev_priv->dev, POWER_DOMAIN_AUDIO);
 }
 }
 EXPORT_SYMBOL_GPL(i915_request_power_well);
 EXPORT_SYMBOL_GPL(i915_request_power_well);
 
 
@@ -5810,24 +5828,71 @@ void i915_release_power_well(void)
 
 
 	dev_priv = container_of(hsw_pwr, struct drm_i915_private,
 	dev_priv = container_of(hsw_pwr, struct drm_i915_private,
 				power_domains);
 				power_domains);
-
-	mutex_lock(&hsw_pwr->lock);
-	__intel_power_well_put(dev_priv->dev, &hsw_pwr->power_wells[0]);
-	mutex_unlock(&hsw_pwr->lock);
+	intel_display_power_put(dev_priv->dev, POWER_DOMAIN_AUDIO);
 }
 }
 EXPORT_SYMBOL_GPL(i915_release_power_well);
 EXPORT_SYMBOL_GPL(i915_release_power_well);
 
 
+static struct i915_power_well i9xx_always_on_power_well[] = {
+	{
+		.name = "always-on",
+		.always_on = 1,
+		.domains = POWER_DOMAIN_MASK,
+	},
+};
+
+static struct i915_power_well hsw_power_wells[] = {
+	{
+		.name = "always-on",
+		.always_on = 1,
+		.domains = HSW_ALWAYS_ON_POWER_DOMAINS,
+	},
+	{
+		.name = "display",
+		.domains = POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS,
+		.is_enabled = hsw_power_well_enabled,
+		.set = hsw_set_power_well,
+	},
+};
+
+static struct i915_power_well bdw_power_wells[] = {
+	{
+		.name = "always-on",
+		.always_on = 1,
+		.domains = BDW_ALWAYS_ON_POWER_DOMAINS,
+	},
+	{
+		.name = "display",
+		.domains = POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS,
+		.is_enabled = hsw_power_well_enabled,
+		.set = hsw_set_power_well,
+	},
+};
+
+#define set_power_wells(power_domains, __power_wells) ({		\
+	(power_domains)->power_wells = (__power_wells);			\
+	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
+})
+
 int intel_power_domains_init(struct drm_device *dev)
 int intel_power_domains_init(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
-	struct i915_power_well *power_well;
 
 
 	mutex_init(&power_domains->lock);
 	mutex_init(&power_domains->lock);
-	hsw_pwr = power_domains;
 
 
-	power_well = &power_domains->power_wells[0];
-	power_well->count = 0;
+	/*
+	 * The enabling order will be from lower to higher indexed wells,
+	 * the disabling order is reversed.
+	 */
+	if (IS_HASWELL(dev)) {
+		set_power_wells(power_domains, hsw_power_wells);
+		hsw_pwr = power_domains;
+	} else if (IS_BROADWELL(dev)) {
+		set_power_wells(power_domains, bdw_power_wells);
+		hsw_pwr = power_domains;
+	} else {
+		set_power_wells(power_domains, i9xx_always_on_power_well);
+	}
 
 
 	return 0;
 	return 0;
 }
 }
@@ -5842,15 +5907,13 @@ static void intel_power_domains_resume(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
 	struct i915_power_well *power_well;
 	struct i915_power_well *power_well;
-
-	if (!HAS_POWER_WELL(dev))
-		return;
+	int i;
 
 
 	mutex_lock(&power_domains->lock);
 	mutex_lock(&power_domains->lock);
-
-	power_well = &power_domains->power_wells[0];
-	__intel_set_power_well(dev, power_well->count > 0);
-
+	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+		if (power_well->set)
+			power_well->set(dev, power_well, power_well->count > 0);
+	}
 	mutex_unlock(&power_domains->lock);
 	mutex_unlock(&power_domains->lock);
 }
 }
 
 
@@ -5864,13 +5927,13 @@ void intel_power_domains_init_hw(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 
-	if (!HAS_POWER_WELL(dev))
-		return;
-
 	/* For now, we need the power well to be always enabled. */
 	/* For now, we need the power well to be always enabled. */
 	intel_display_set_init_power(dev, true);
 	intel_display_set_init_power(dev, true);
 	intel_power_domains_resume(dev);
 	intel_power_domains_resume(dev);
 
 
+	if (!(IS_HASWELL(dev) || IS_BROADWELL(dev)))
+		return;
+
 	/* We're taking over the BIOS, so clear any requests made by it since
 	/* We're taking over the BIOS, so clear any requests made by it since
 	 * the driver is in charge now. */
 	 * the driver is in charge now. */
 	if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
 	if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
@@ -6075,59 +6138,48 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
 	return 0;
 	return 0;
 }
 }
 
 
-int vlv_gpu_freq(int ddr_freq, int val)
+int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
 {
 {
-	int mult, base;
+	int div;
 
 
-	switch (ddr_freq) {
+	/* 4 x czclk */
+	switch (dev_priv->mem_freq) {
 	case 800:
 	case 800:
-		mult = 20;
-		base = 120;
+		div = 10;
 		break;
 		break;
 	case 1066:
 	case 1066:
-		mult = 22;
-		base = 133;
+		div = 12;
 		break;
 		break;
 	case 1333:
 	case 1333:
-		mult = 21;
-		base = 125;
+		div = 16;
 		break;
 		break;
 	default:
 	default:
 		return -1;
 		return -1;
 	}
 	}
 
 
-	return ((val - 0xbd) * mult) + base;
+	return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
 }
 }
 
 
-int vlv_freq_opcode(int ddr_freq, int val)
+int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
 {
 {
-	int mult, base;
+	int mul;
 
 
-	switch (ddr_freq) {
+	/* 4 x czclk */
+	switch (dev_priv->mem_freq) {
 	case 800:
 	case 800:
-		mult = 20;
-		base = 120;
+		mul = 10;
 		break;
 		break;
 	case 1066:
 	case 1066:
-		mult = 22;
-		base = 133;
+		mul = 12;
 		break;
 		break;
 	case 1333:
 	case 1333:
-		mult = 21;
-		base = 125;
+		mul = 16;
 		break;
 		break;
 	default:
 	default:
 		return -1;
 		return -1;
 	}
 	}
 
 
-	val /= mult;
-	val -= base / mult;
-	val += 0xbd;
-
-	if (val > 0xea)
-		val = 0xea;
-
-	return val;
+	return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
 }
 }
 
 
 void intel_pm_init(struct drm_device *dev)
 void intel_pm_init(struct drm_device *dev)

+ 9 - 13
drivers/gpu/drm/i915/intel_ringbuffer.c

@@ -285,14 +285,16 @@ static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
 	if (!ring->fbc_dirty)
 	if (!ring->fbc_dirty)
 		return 0;
 		return 0;
 
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(ring, 6);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
-	intel_ring_emit(ring, MI_NOOP);
 	/* WaFbcNukeOn3DBlt:ivb/hsw */
 	/* WaFbcNukeOn3DBlt:ivb/hsw */
 	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
 	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
 	intel_ring_emit(ring, MSG_FBC_REND_STATE);
 	intel_ring_emit(ring, MSG_FBC_REND_STATE);
 	intel_ring_emit(ring, value);
 	intel_ring_emit(ring, value);
+	intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
+	intel_ring_emit(ring, MSG_FBC_REND_STATE);
+	intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
 	intel_ring_advance(ring);
 	intel_ring_advance(ring);
 
 
 	ring->fbc_dirty = false;
 	ring->fbc_dirty = false;
@@ -354,7 +356,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, 0);
 	intel_ring_advance(ring);
 	intel_ring_advance(ring);
 
 
-	if (flush_domains)
+	if (!invalidate_domains && flush_domains)
 		return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
 		return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
 
 
 	return 0;
 	return 0;
@@ -436,7 +438,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
 	int ret = 0;
 	int ret = 0;
 	u32 head;
 	u32 head;
 
 
-	gen6_gt_force_wake_get(dev_priv);
+	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
 
 	if (I915_NEED_GFX_HWS(dev))
 	if (I915_NEED_GFX_HWS(dev))
 		intel_ring_setup_status_page(ring);
 		intel_ring_setup_status_page(ring);
@@ -509,7 +511,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
 	memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
 	memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
 
 
 out:
 out:
-	gen6_gt_force_wake_put(dev_priv);
+	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 
 
 	return ret;
 	return ret;
 }
 }
@@ -965,6 +967,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
 	} else if (IS_GEN6(ring->dev)) {
 	} else if (IS_GEN6(ring->dev)) {
 		mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
 		mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
 	} else {
 	} else {
+		/* XXX: gen8 returns to sanity */
 		mmio = RING_HWS_PGA(ring->mmio_base);
 		mmio = RING_HWS_PGA(ring->mmio_base);
 	}
 	}
 
 
@@ -1029,11 +1032,6 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
 	if (!dev->irq_enabled)
 	if (!dev->irq_enabled)
 	       return false;
 	       return false;
 
 
-	/* It looks like we need to prevent the gt from suspending while waiting
-	 * for an notifiy irq, otherwise irqs seem to get lost on at least the
-	 * blt/bsd rings on ivb. */
-	gen6_gt_force_wake_get(dev_priv);
-
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (ring->irq_refcount++ == 0) {
 	if (ring->irq_refcount++ == 0) {
 		if (HAS_L3_DPF(dev) && ring->id == RCS)
 		if (HAS_L3_DPF(dev) && ring->id == RCS)
@@ -1065,8 +1063,6 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
 		ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
 		ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
 	}
 	}
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-	gen6_gt_force_wake_put(dev_priv);
 }
 }
 
 
 static bool
 static bool
@@ -1837,7 +1833,7 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
 	}
 	}
 	intel_ring_advance(ring);
 	intel_ring_advance(ring);
 
 
-	if (IS_GEN7(dev) && flush)
+	if (IS_GEN7(dev) && !invalidate && flush)
 		return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
 		return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
 
 
 	return 0;
 	return 0;

+ 40 - 20
drivers/gpu/drm/i915/intel_sdvo.c

@@ -413,23 +413,34 @@ static const struct _sdvo_cmd_name {
 static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
 static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
 				   const void *args, int args_len)
 				   const void *args, int args_len)
 {
 {
-	int i;
+	int i, pos = 0;
+#define BUF_LEN 256
+	char buffer[BUF_LEN];
+
+#define BUF_PRINT(args...) \
+	pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
+
 
 
-	DRM_DEBUG_KMS("%s: W: %02X ",
-				SDVO_NAME(intel_sdvo), cmd);
-	for (i = 0; i < args_len; i++)
-		DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
-	for (; i < 8; i++)
-		DRM_LOG_KMS("   ");
+	for (i = 0; i < args_len; i++) {
+		BUF_PRINT("%02X ", ((u8 *)args)[i]);
+	}
+	for (; i < 8; i++) {
+		BUF_PRINT("   ");
+	}
 	for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
 	for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
 		if (cmd == sdvo_cmd_names[i].cmd) {
 		if (cmd == sdvo_cmd_names[i].cmd) {
-			DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
+			BUF_PRINT("(%s)", sdvo_cmd_names[i].name);
 			break;
 			break;
 		}
 		}
 	}
 	}
-	if (i == ARRAY_SIZE(sdvo_cmd_names))
-		DRM_LOG_KMS("(%02X)", cmd);
-	DRM_LOG_KMS("\n");
+	if (i == ARRAY_SIZE(sdvo_cmd_names)) {
+		BUF_PRINT("(%02X)", cmd);
+	}
+	BUG_ON(pos >= BUF_LEN - 1);
+#undef BUF_PRINT
+#undef BUF_LEN
+
+	DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
 }
 }
 
 
 static const char *cmd_status_names[] = {
 static const char *cmd_status_names[] = {
@@ -512,9 +523,10 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
 {
 {
 	u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
 	u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
 	u8 status;
 	u8 status;
-	int i;
+	int i, pos = 0;
+#define BUF_LEN 256
+	char buffer[BUF_LEN];
 
 
-	DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
 
 
 	/*
 	/*
 	 * The documentation states that all commands will be
 	 * The documentation states that all commands will be
@@ -551,10 +563,13 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
 			goto log_fail;
 			goto log_fail;
 	}
 	}
 
 
+#define BUF_PRINT(args...) \
+	pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
+
 	if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
 	if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
-		DRM_LOG_KMS("(%s)", cmd_status_names[status]);
+		BUF_PRINT("(%s)", cmd_status_names[status]);
 	else
 	else
-		DRM_LOG_KMS("(??? %d)", status);
+		BUF_PRINT("(??? %d)", status);
 
 
 	if (status != SDVO_CMD_STATUS_SUCCESS)
 	if (status != SDVO_CMD_STATUS_SUCCESS)
 		goto log_fail;
 		goto log_fail;
@@ -565,13 +580,17 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
 					  SDVO_I2C_RETURN_0 + i,
 					  SDVO_I2C_RETURN_0 + i,
 					  &((u8 *)response)[i]))
 					  &((u8 *)response)[i]))
 			goto log_fail;
 			goto log_fail;
-		DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
+		BUF_PRINT(" %02X", ((u8 *)response)[i]);
 	}
 	}
-	DRM_LOG_KMS("\n");
+	BUG_ON(pos >= BUF_LEN - 1);
+#undef BUF_PRINT
+#undef BUF_LEN
+
+	DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer);
 	return true;
 	return true;
 
 
 log_fail:
 log_fail:
-	DRM_LOG_KMS("... failed\n");
+	DRM_DEBUG_KMS("%s: R: ... failed\n", SDVO_NAME(intel_sdvo));
 	return false;
 	return false;
 }
 }
 
 
@@ -1517,8 +1536,9 @@ static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
 	intel_modeset_check_state(connector->dev);
 	intel_modeset_check_state(connector->dev);
 }
 }
 
 
-static int intel_sdvo_mode_valid(struct drm_connector *connector,
-				 struct drm_display_mode *mode)
+static enum drm_mode_status
+intel_sdvo_mode_valid(struct drm_connector *connector,
+		      struct drm_display_mode *mode)
 {
 {
 	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
 	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
 
 

+ 18 - 11
drivers/gpu/drm/i915/intel_sideband.c

@@ -90,6 +90,22 @@ void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
 	mutex_unlock(&dev_priv->dpio_lock);
 	mutex_unlock(&dev_priv->dpio_lock);
 }
 }
 
 
+u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+	u32 val = 0;
+
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
+			PUNIT_OPCODE_REG_READ, reg, &val);
+
+	return val;
+}
+
+void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
+			PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
 {
 {
 	u32 val = 0;
 	u32 val = 0;
@@ -160,27 +176,18 @@ void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
 			PUNIT_OPCODE_REG_WRITE, reg, &val);
 			PUNIT_OPCODE_REG_WRITE, reg, &val);
 }
 }
 
 
-static u32 vlv_get_phy_port(enum pipe pipe)
-{
-	u32 port = IOSF_PORT_DPIO;
-
-	WARN_ON ((pipe != PIPE_A) && (pipe != PIPE_B));
-
-	return port;
-}
-
 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
 {
 {
 	u32 val = 0;
 	u32 val = 0;
 
 
-	vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
+	vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
 			DPIO_OPCODE_REG_READ, reg, &val);
 			DPIO_OPCODE_REG_READ, reg, &val);
 	return val;
 	return val;
 }
 }
 
 
 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
 {
 {
-	vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
+	vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
 			DPIO_OPCODE_REG_WRITE, reg, &val);
 			DPIO_OPCODE_REG_WRITE, reg, &val);
 }
 }
 
 

+ 18 - 0
drivers/gpu/drm/i915/intel_sprite.c

@@ -104,6 +104,12 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
 		break;
 		break;
 	}
 	}
 
 
+	/*
+	 * Enable gamma to match primary/cursor plane behaviour.
+	 * FIXME should be user controllable via propertiesa.
+	 */
+	sprctl |= SP_GAMMA_ENABLE;
+
 	if (obj->tiling_mode != I915_TILING_NONE)
 	if (obj->tiling_mode != I915_TILING_NONE)
 		sprctl |= SP_TILED;
 		sprctl |= SP_TILED;
 
 
@@ -257,6 +263,12 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 		BUG();
 		BUG();
 	}
 	}
 
 
+	/*
+	 * Enable gamma to match primary/cursor plane behaviour.
+	 * FIXME should be user controllable via propertiesa.
+	 */
+	sprctl |= SPRITE_GAMMA_ENABLE;
+
 	if (obj->tiling_mode != I915_TILING_NONE)
 	if (obj->tiling_mode != I915_TILING_NONE)
 		sprctl |= SPRITE_TILED;
 		sprctl |= SPRITE_TILED;
 
 
@@ -453,6 +465,12 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 		BUG();
 		BUG();
 	}
 	}
 
 
+	/*
+	 * Enable gamma to match primary/cursor plane behaviour.
+	 * FIXME should be user controllable via propertiesa.
+	 */
+	dvscntr |= DVS_GAMMA_ENABLE;
+
 	if (obj->tiling_mode != I915_TILING_NONE)
 	if (obj->tiling_mode != I915_TILING_NONE)
 		dvscntr |= DVS_TILED;
 		dvscntr |= DVS_TILED;
 
 

+ 218 - 57
drivers/gpu/drm/i915/intel_uncore.c

@@ -64,7 +64,8 @@ static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
 	__raw_posting_read(dev_priv, ECOBUS);
 	__raw_posting_read(dev_priv, ECOBUS);
 }
 }
 
 
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
+							int fw_engine)
 {
 {
 	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
 	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
 			    FORCEWAKE_ACK_TIMEOUT_MS))
 			    FORCEWAKE_ACK_TIMEOUT_MS))
@@ -89,7 +90,8 @@ static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
 	__raw_posting_read(dev_priv, ECOBUS);
 	__raw_posting_read(dev_priv, ECOBUS);
 }
 }
 
 
-static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
+							int fw_engine)
 {
 {
 	u32 forcewake_ack;
 	u32 forcewake_ack;
 
 
@@ -121,12 +123,12 @@ static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
 	u32 gtfifodbg;
 	u32 gtfifodbg;
 
 
 	gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
 	gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
-	if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
-	     "MMIO read or write has been dropped %x\n", gtfifodbg))
-		__raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
+	if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
+		__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
 }
 }
 
 
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
+							int fw_engine)
 {
 {
 	__raw_i915_write32(dev_priv, FORCEWAKE, 0);
 	__raw_i915_write32(dev_priv, FORCEWAKE, 0);
 	/* something from same cacheline, but !FORCEWAKE */
 	/* something from same cacheline, but !FORCEWAKE */
@@ -134,7 +136,8 @@ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 	gen6_gt_check_fifodbg(dev_priv);
 	gen6_gt_check_fifodbg(dev_priv);
 }
 }
 
 
-static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
+							int fw_engine)
 {
 {
 	__raw_i915_write32(dev_priv, FORCEWAKE_MT,
 	__raw_i915_write32(dev_priv, FORCEWAKE_MT,
 			   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
 			   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
@@ -149,10 +152,10 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
 
 
 	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
 	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
 		int loop = 500;
 		int loop = 500;
-		u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+		u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
 		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
 		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
 			udelay(10);
 			udelay(10);
-			fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+			fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
 		}
 		}
 		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
 		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
 			++ret;
 			++ret;
@@ -171,38 +174,112 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
 	__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
 	__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
 }
 }
 
 
-static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
+static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
+						int fw_engine)
 {
 {
-	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
-			    FORCEWAKE_ACK_TIMEOUT_MS))
-		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+	/* Check for Render Engine */
+	if (FORCEWAKE_RENDER & fw_engine) {
+		if (wait_for_atomic((__raw_i915_read32(dev_priv,
+						FORCEWAKE_ACK_VLV) &
+						FORCEWAKE_KERNEL) == 0,
+					FORCEWAKE_ACK_TIMEOUT_MS))
+			DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
 
 
-	__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
-			   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-	__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
-			   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+		__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+				   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
 
 
-	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
-			    FORCEWAKE_ACK_TIMEOUT_MS))
-		DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
+		if (wait_for_atomic((__raw_i915_read32(dev_priv,
+						FORCEWAKE_ACK_VLV) &
+						FORCEWAKE_KERNEL),
+					FORCEWAKE_ACK_TIMEOUT_MS))
+			DRM_ERROR("Timed out: waiting for Render to ack.\n");
+	}
 
 
-	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) &
-			     FORCEWAKE_KERNEL),
-			    FORCEWAKE_ACK_TIMEOUT_MS))
-		DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
+	/* Check for Media Engine */
+	if (FORCEWAKE_MEDIA & fw_engine) {
+		if (wait_for_atomic((__raw_i915_read32(dev_priv,
+						FORCEWAKE_ACK_MEDIA_VLV) &
+						FORCEWAKE_KERNEL) == 0,
+					FORCEWAKE_ACK_TIMEOUT_MS))
+			DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
+
+		__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+				   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+		if (wait_for_atomic((__raw_i915_read32(dev_priv,
+						FORCEWAKE_ACK_MEDIA_VLV) &
+						FORCEWAKE_KERNEL),
+					FORCEWAKE_ACK_TIMEOUT_MS))
+			DRM_ERROR("Timed out: waiting for media to ack.\n");
+	}
 
 
 	/* WaRsForcewakeWaitTC0:vlv */
 	/* WaRsForcewakeWaitTC0:vlv */
 	__gen6_gt_wait_for_thread_c0(dev_priv);
 	__gen6_gt_wait_for_thread_c0(dev_priv);
+
 }
 }
 
 
-static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
+static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
+					int fw_engine)
 {
 {
-	__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
-			   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-	__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
-			   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
+	/* Check for Render Engine */
+	if (FORCEWAKE_RENDER & fw_engine)
+		__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+					_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
+
+	/* Check for Media Engine */
+	if (FORCEWAKE_MEDIA & fw_engine)
+		__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+				_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+
 	/* The below doubles as a POSTING_READ */
 	/* The below doubles as a POSTING_READ */
 	gen6_gt_check_fifodbg(dev_priv);
 	gen6_gt_check_fifodbg(dev_priv);
+
+}
+
+void vlv_force_wake_get(struct drm_i915_private *dev_priv,
+						int fw_engine)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	if (FORCEWAKE_RENDER & fw_engine) {
+		if (dev_priv->uncore.fw_rendercount++ == 0)
+			dev_priv->uncore.funcs.force_wake_get(dev_priv,
+							FORCEWAKE_RENDER);
+	}
+	if (FORCEWAKE_MEDIA & fw_engine) {
+		if (dev_priv->uncore.fw_mediacount++ == 0)
+			dev_priv->uncore.funcs.force_wake_get(dev_priv,
+							FORCEWAKE_MEDIA);
+	}
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+void vlv_force_wake_put(struct drm_i915_private *dev_priv,
+						int fw_engine)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+	if (FORCEWAKE_RENDER & fw_engine) {
+		WARN_ON(dev_priv->uncore.fw_rendercount == 0);
+		if (--dev_priv->uncore.fw_rendercount == 0)
+			dev_priv->uncore.funcs.force_wake_put(dev_priv,
+							FORCEWAKE_RENDER);
+	}
+
+	if (FORCEWAKE_MEDIA & fw_engine) {
+		WARN_ON(dev_priv->uncore.fw_mediacount == 0);
+		if (--dev_priv->uncore.fw_mediacount == 0)
+			dev_priv->uncore.funcs.force_wake_put(dev_priv,
+							FORCEWAKE_MEDIA);
+	}
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 }
 
 
 static void gen6_force_wake_work(struct work_struct *work)
 static void gen6_force_wake_work(struct work_struct *work)
@@ -213,7 +290,7 @@ static void gen6_force_wake_work(struct work_struct *work)
 
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 	if (--dev_priv->uncore.forcewake_count == 0)
 	if (--dev_priv->uncore.forcewake_count == 0)
-		dev_priv->uncore.funcs.force_wake_put(dev_priv);
+		dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 }
 
 
@@ -281,29 +358,38 @@ void intel_uncore_sanitize(struct drm_device *dev)
  * be called at the beginning of the sequence followed by a call to
  * be called at the beginning of the sequence followed by a call to
  * gen6_gt_force_wake_put() at the end of the sequence.
  * gen6_gt_force_wake_put() at the end of the sequence.
  */
  */
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
 {
 {
 	unsigned long irqflags;
 	unsigned long irqflags;
 
 
 	if (!dev_priv->uncore.funcs.force_wake_get)
 	if (!dev_priv->uncore.funcs.force_wake_get)
 		return;
 		return;
 
 
+	/* Redirect to VLV specific routine */
+	if (IS_VALLEYVIEW(dev_priv->dev))
+		return vlv_force_wake_get(dev_priv, fw_engine);
+
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 	if (dev_priv->uncore.forcewake_count++ == 0)
 	if (dev_priv->uncore.forcewake_count++ == 0)
-		dev_priv->uncore.funcs.force_wake_get(dev_priv);
+		dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 }
 
 
 /*
 /*
  * see gen6_gt_force_wake_get()
  * see gen6_gt_force_wake_get()
  */
  */
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
 {
 {
 	unsigned long irqflags;
 	unsigned long irqflags;
 
 
 	if (!dev_priv->uncore.funcs.force_wake_put)
 	if (!dev_priv->uncore.funcs.force_wake_put)
 		return;
 		return;
 
 
+	/* Redirect to VLV specific routine */
+	if (IS_VALLEYVIEW(dev_priv->dev))
+		return vlv_force_wake_put(dev_priv, fw_engine);
+
+
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 	if (--dev_priv->uncore.forcewake_count == 0) {
 	if (--dev_priv->uncore.forcewake_count == 0) {
 		dev_priv->uncore.forcewake_count++;
 		dev_priv->uncore.forcewake_count++;
@@ -379,16 +465,51 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
 	REG_READ_HEADER(x); \
 	REG_READ_HEADER(x); \
 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
 		if (dev_priv->uncore.forcewake_count == 0) \
 		if (dev_priv->uncore.forcewake_count == 0) \
-			dev_priv->uncore.funcs.force_wake_get(dev_priv); \
+			dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+							FORCEWAKE_ALL); \
 		val = __raw_i915_read##x(dev_priv, reg); \
 		val = __raw_i915_read##x(dev_priv, reg); \
 		if (dev_priv->uncore.forcewake_count == 0) \
 		if (dev_priv->uncore.forcewake_count == 0) \
-			dev_priv->uncore.funcs.force_wake_put(dev_priv); \
+			dev_priv->uncore.funcs.force_wake_put(dev_priv, \
+							FORCEWAKE_ALL); \
+	} else { \
+		val = __raw_i915_read##x(dev_priv, reg); \
+	} \
+	REG_READ_FOOTER; \
+}
+
+#define __vlv_read(x) \
+static u##x \
+vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+	unsigned fwengine = 0; \
+	unsigned *fwcount; \
+	REG_READ_HEADER(x); \
+	if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) {   \
+		fwengine = FORCEWAKE_RENDER;            \
+		fwcount = &dev_priv->uncore.fw_rendercount;    \
+	}                                               \
+	else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) {       \
+		fwengine = FORCEWAKE_MEDIA;             \
+		fwcount = &dev_priv->uncore.fw_mediacount;     \
+	}  \
+	if (fwengine != 0) {		\
+		if ((*fwcount)++ == 0) \
+			(dev_priv)->uncore.funcs.force_wake_get(dev_priv, \
+								fwengine); \
+		val = __raw_i915_read##x(dev_priv, reg); \
+		if (--(*fwcount) == 0) \
+			(dev_priv)->uncore.funcs.force_wake_put(dev_priv, \
+							fwengine); \
 	} else { \
 	} else { \
 		val = __raw_i915_read##x(dev_priv, reg); \
 		val = __raw_i915_read##x(dev_priv, reg); \
 	} \
 	} \
 	REG_READ_FOOTER; \
 	REG_READ_FOOTER; \
 }
 }
 
 
+
+__vlv_read(8)
+__vlv_read(16)
+__vlv_read(32)
+__vlv_read(64)
 __gen6_read(8)
 __gen6_read(8)
 __gen6_read(16)
 __gen6_read(16)
 __gen6_read(32)
 __gen6_read(32)
@@ -402,6 +523,7 @@ __gen4_read(16)
 __gen4_read(32)
 __gen4_read(32)
 __gen4_read(64)
 __gen4_read(64)
 
 
+#undef __vlv_read
 #undef __gen6_read
 #undef __gen6_read
 #undef __gen5_read
 #undef __gen5_read
 #undef __gen4_read
 #undef __gen4_read
@@ -489,11 +611,13 @@ gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
 	bool __needs_put = !is_gen8_shadowed(dev_priv, reg); \
 	bool __needs_put = !is_gen8_shadowed(dev_priv, reg); \
 	REG_WRITE_HEADER; \
 	REG_WRITE_HEADER; \
 	if (__needs_put) { \
 	if (__needs_put) { \
-		dev_priv->uncore.funcs.force_wake_get(dev_priv); \
+		dev_priv->uncore.funcs.force_wake_get(dev_priv, \
+							FORCEWAKE_ALL); \
 	} \
 	} \
 	__raw_i915_write##x(dev_priv, reg, val); \
 	__raw_i915_write##x(dev_priv, reg, val); \
 	if (__needs_put) { \
 	if (__needs_put) { \
-		dev_priv->uncore.funcs.force_wake_put(dev_priv); \
+		dev_priv->uncore.funcs.force_wake_put(dev_priv, \
+							FORCEWAKE_ALL); \
 	} \
 	} \
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
 }
 }
@@ -534,8 +658,8 @@ void intel_uncore_init(struct drm_device *dev)
 			  gen6_force_wake_work);
 			  gen6_force_wake_work);
 
 
 	if (IS_VALLEYVIEW(dev)) {
 	if (IS_VALLEYVIEW(dev)) {
-		dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
-		dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
+		dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
+		dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
 	} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
 	} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
 		dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
 		dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
 		dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
 		dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
@@ -552,9 +676,9 @@ void intel_uncore_init(struct drm_device *dev)
 		 * forcewake being disabled.
 		 * forcewake being disabled.
 		 */
 		 */
 		mutex_lock(&dev->struct_mutex);
 		mutex_lock(&dev->struct_mutex);
-		__gen6_gt_force_wake_mt_get(dev_priv);
+		__gen6_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
 		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
 		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
-		__gen6_gt_force_wake_mt_put(dev_priv);
+		__gen6_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
 		mutex_unlock(&dev->struct_mutex);
 		mutex_unlock(&dev->struct_mutex);
 
 
 		if (ecobus & FORCEWAKE_MT_ENABLE) {
 		if (ecobus & FORCEWAKE_MT_ENABLE) {
@@ -601,10 +725,18 @@ void intel_uncore_init(struct drm_device *dev)
 			dev_priv->uncore.funcs.mmio_writel  = gen6_write32;
 			dev_priv->uncore.funcs.mmio_writel  = gen6_write32;
 			dev_priv->uncore.funcs.mmio_writeq  = gen6_write64;
 			dev_priv->uncore.funcs.mmio_writeq  = gen6_write64;
 		}
 		}
-		dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
-		dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
-		dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
-		dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
+
+		if (IS_VALLEYVIEW(dev)) {
+			dev_priv->uncore.funcs.mmio_readb  = vlv_read8;
+			dev_priv->uncore.funcs.mmio_readw  = vlv_read16;
+			dev_priv->uncore.funcs.mmio_readl  = vlv_read32;
+			dev_priv->uncore.funcs.mmio_readq  = vlv_read64;
+		} else {
+			dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
+			dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
+			dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
+			dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
+		}
 		break;
 		break;
 	case 5:
 	case 5:
 		dev_priv->uncore.funcs.mmio_writeb  = gen5_write8;
 		dev_priv->uncore.funcs.mmio_writeb  = gen5_write8;
@@ -687,6 +819,43 @@ int i915_reg_read_ioctl(struct drm_device *dev,
 	return 0;
 	return 0;
 }
 }
 
 
+int i915_get_reset_stats_ioctl(struct drm_device *dev,
+			       void *data, struct drm_file *file)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_reset_stats *args = data;
+	struct i915_ctx_hang_stats *hs;
+	int ret;
+
+	if (args->flags || args->pad)
+		return -EINVAL;
+
+	if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	hs = i915_gem_context_get_hang_stats(dev, file, args->ctx_id);
+	if (IS_ERR(hs)) {
+		mutex_unlock(&dev->struct_mutex);
+		return PTR_ERR(hs);
+	}
+
+	if (capable(CAP_SYS_ADMIN))
+		args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+	else
+		args->reset_count = 0;
+
+	args->batch_active = hs->batch_active;
+	args->batch_pending = hs->batch_pending;
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
 static int i965_reset_complete(struct drm_device *dev)
 static int i965_reset_complete(struct drm_device *dev)
 {
 {
 	u8 gdrst;
 	u8 gdrst;
@@ -770,12 +939,12 @@ static int gen6_do_reset(struct drm_device *dev)
 
 
 	/* If reset with a user forcewake, try to restore, otherwise turn it off */
 	/* If reset with a user forcewake, try to restore, otherwise turn it off */
 	if (dev_priv->uncore.forcewake_count)
 	if (dev_priv->uncore.forcewake_count)
-		dev_priv->uncore.funcs.force_wake_get(dev_priv);
+		dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
 	else
 	else
-		dev_priv->uncore.funcs.force_wake_put(dev_priv);
+		dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
 
 
 	/* Restore fifo count */
 	/* Restore fifo count */
-	dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+	dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
 
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 	return ret;
 	return ret;
@@ -784,6 +953,7 @@ static int gen6_do_reset(struct drm_device *dev)
 int intel_gpu_reset(struct drm_device *dev)
 int intel_gpu_reset(struct drm_device *dev)
 {
 {
 	switch (INTEL_INFO(dev)->gen) {
 	switch (INTEL_INFO(dev)->gen) {
+	case 8:
 	case 7:
 	case 7:
 	case 6: return gen6_do_reset(dev);
 	case 6: return gen6_do_reset(dev);
 	case 5: return ironlake_do_reset(dev);
 	case 5: return ironlake_do_reset(dev);
@@ -792,15 +962,6 @@ int intel_gpu_reset(struct drm_device *dev)
 	}
 	}
 }
 }
 
 
-void intel_uncore_clear_errors(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	/* XXX needs spinlock around caller's grouping */
-	if (HAS_FPGA_DBG_UNCLAIMED(dev))
-		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-}
-
 void intel_uncore_check_errors(struct drm_device *dev)
 void intel_uncore_check_errors(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;

+ 19 - 0
include/uapi/drm/i915_drm.h

@@ -222,6 +222,7 @@ typedef struct _drm_i915_sarea {
 #define DRM_I915_GEM_SET_CACHING	0x2f
 #define DRM_I915_GEM_SET_CACHING	0x2f
 #define DRM_I915_GEM_GET_CACHING	0x30
 #define DRM_I915_GEM_GET_CACHING	0x30
 #define DRM_I915_REG_READ		0x31
 #define DRM_I915_REG_READ		0x31
+#define DRM_I915_GET_RESET_STATS	0x32
 
 
 #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
 #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -271,6 +272,7 @@ typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
 #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
 #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
 #define DRM_IOCTL_I915_REG_READ			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
 #define DRM_IOCTL_I915_REG_READ			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
+#define DRM_IOCTL_I915_GET_RESET_STATS		DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
 
 
 /* Allow drivers to submit batchbuffers directly to hardware, relying
 /* Allow drivers to submit batchbuffers directly to hardware, relying
  * on the security mechanisms provided by hardware.
  * on the security mechanisms provided by hardware.
@@ -1030,4 +1032,21 @@ struct drm_i915_reg_read {
 	__u64 offset;
 	__u64 offset;
 	__u64 val; /* Return value */
 	__u64 val; /* Return value */
 };
 };
+
+struct drm_i915_reset_stats {
+	__u32 ctx_id;
+	__u32 flags;
+
+	/* All resets since boot/module reload, for all contexts */
+	__u32 reset_count;
+
+	/* Number of batches lost when active in GPU, for this context */
+	__u32 batch_active;
+
+	/* Number of batches lost pending for execution, for this context */
+	__u32 batch_pending;
+
+	__u32 pad;
+};
+
 #endif /* _UAPI_I915_DRM_H_ */
 #endif /* _UAPI_I915_DRM_H_ */