Browse Source

Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux

Pull drm updates from Dave Airlie:
 "This is a combo of -next and some -fixes that came in in the
  intervening time.

  Highlights:

  New drivers:
    ARM Armada driver for Marvell Armada 510 SOCs

  Intel:
    Broadwell initial support under a default off switch,
    Stereo/3D HDMI mode support
    Valleyview improvements
    Displayport improvements
    Haswell fixes
    initial mipi dsi panel support
    CRC support for debugging
    build with CONFIG_FB=n

  Radeon:
    enable DPM on a number of GPUs by default
    secondary GPU powerdown support
    enable HDMI audio by default
    Hawaii support

  Nouveau:
    dynamic pm code infrastructure reworked, does nothing major yet
    GK208 modesetting support
    MSI fixes, on by default again
    PMPEG improvements
    pageflipping fixes

  GMA500:
    minnowboard SDVO support

  VMware:
    misc fixes

  MSM:
    prime, plane and rendernodes support

  Tegra:
    rearchitected to put the drm driver into the drm subsystem.
    HDMI and gr2d support for tegra 114 SoC

  QXL:
    oops fix, and multi-head fixes

  DRM core:
    sysfs lifetime fixes
    client capability ioctl
    further cleanups to device midlayer
    more vblank timestamp fixes"

* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (789 commits)
  drm/nouveau: do not map evicted vram buffers in nouveau_bo_vma_add
  drm/nvc0-/gr: shift wrapping bug in nvc0_grctx_generate_r406800
  drm/nouveau/pwr: fix missing mutex unlock in a failure path
  drm/nv40/therm: fix slowing down fan when pstate undefined
  drm/nv11-: synchronise flips to vblank, unless async flip requested
  drm/nvc0-: remove nasty fifo swmthd hack for flip completion method
  drm/nv10-: we no longer need to create nvsw object on user channels
  drm/nouveau: always queue flips relative to kernel channel activity
  drm/nouveau: there is no need to reserve/fence the new fb when flipping
  drm/nouveau: when bailing out of a pushbuf ioctl, do not remove previous fence
  drm/nouveau: allow nouveau_fence_ref() to be a noop
  drm/nvc8/mc: msi rearm is via the nvc0 method
  drm/ttm: Fix vma page_prot bit manipulation
  drm/vmwgfx: Fix a couple of compile / sparse warnings and errors
  drm/vmwgfx: Resource evict fixes
  drm/edid: compare actual vrefresh for all modes for quirks
  drm: shmob_drm: Convert to clk_prepare/unprepare
  drm/nouveau: fix 32-bit build
  drm/i915/opregion: fix build error on CONFIG_ACPI=n
  Revert "drm/radeon/audio: don't set speaker allocation on DCE4+"
  ...
Linus Torvalds 11 years ago
parent
commit
049ffa8ab3
100 changed files with 10461 additions and 2503 deletions
  1. 2 0
      MAINTAINERS
  2. 12 0
      arch/x86/kernel/early-quirks.c
  3. 14 59
      drivers/gpu/drm/Kconfig
  4. 4 1
      drivers/gpu/drm/Makefile
  5. 24 0
      drivers/gpu/drm/armada/Kconfig
  6. 7 0
      drivers/gpu/drm/armada/Makefile
  7. 87 0
      drivers/gpu/drm/armada/armada_510.c
  8. 1098 0
      drivers/gpu/drm/armada/armada_crtc.c
  9. 83 0
      drivers/gpu/drm/armada/armada_crtc.h
  10. 177 0
      drivers/gpu/drm/armada/armada_debugfs.c
  11. 113 0
      drivers/gpu/drm/armada/armada_drm.h
  12. 421 0
      drivers/gpu/drm/armada/armada_drv.c
  13. 170 0
      drivers/gpu/drm/armada/armada_fb.c
  14. 24 0
      drivers/gpu/drm/armada/armada_fb.h
  15. 202 0
      drivers/gpu/drm/armada/armada_fbdev.c
  16. 611 0
      drivers/gpu/drm/armada/armada_gem.c
  17. 52 0
      drivers/gpu/drm/armada/armada_gem.h
  18. 318 0
      drivers/gpu/drm/armada/armada_hw.h
  19. 18 0
      drivers/gpu/drm/armada/armada_ioctlP.h
  20. 158 0
      drivers/gpu/drm/armada/armada_output.c
  21. 39 0
      drivers/gpu/drm/armada/armada_output.h
  22. 477 0
      drivers/gpu/drm/armada/armada_overlay.c
  23. 139 0
      drivers/gpu/drm/armada/armada_slave.c
  24. 26 0
      drivers/gpu/drm/armada/armada_slave.h
  25. 1 0
      drivers/gpu/drm/ast/Kconfig
  26. 0 1
      drivers/gpu/drm/ast/ast_drv.c
  27. 0 1
      drivers/gpu/drm/ast/ast_drv.h
  28. 0 6
      drivers/gpu/drm/ast/ast_main.c
  29. 1 0
      drivers/gpu/drm/cirrus/Kconfig
  30. 0 1
      drivers/gpu/drm/cirrus/cirrus_drv.c
  31. 0 1
      drivers/gpu/drm/cirrus/cirrus_drv.h
  32. 0 6
      drivers/gpu/drm/cirrus/cirrus_main.c
  33. 5 6
      drivers/gpu/drm/cirrus/cirrus_mode.c
  34. 0 2
      drivers/gpu/drm/drm_context.c
  35. 98 55
      drivers/gpu/drm/drm_crtc.c
  36. 49 47
      drivers/gpu/drm/drm_crtc_helper.c
  37. 3 3
      drivers/gpu/drm/drm_debugfs.c
  38. 8 8
      drivers/gpu/drm/drm_dp_helper.c
  39. 2 72
      drivers/gpu/drm/drm_drv.c
  40. 281 33
      drivers/gpu/drm/drm_edid.c
  41. 54 54
      drivers/gpu/drm/drm_edid_load.c
  42. 2 15
      drivers/gpu/drm/drm_fb_helper.c
  43. 71 6
      drivers/gpu/drm/drm_fops.c
  44. 0 29
      drivers/gpu/drm/drm_gem.c
  45. 0 2
      drivers/gpu/drm/drm_global.c
  46. 3 3
      drivers/gpu/drm/drm_info.c
  47. 21 0
      drivers/gpu/drm/drm_ioctl.c
  48. 66 111
      drivers/gpu/drm/drm_irq.c
  49. 0 3
      drivers/gpu/drm/drm_lock.c
  50. 35 8
      drivers/gpu/drm/drm_modes.c
  51. 11 58
      drivers/gpu/drm/drm_pci.c
  52. 5 54
      drivers/gpu/drm/drm_platform.c
  53. 1 2
      drivers/gpu/drm/drm_prime.c
  54. 242 120
      drivers/gpu/drm/drm_stub.c
  55. 31 65
      drivers/gpu/drm/drm_sysfs.c
  56. 6 51
      drivers/gpu/drm/drm_usb.c
  57. 1 1
      drivers/gpu/drm/drm_vm.c
  58. 1 0
      drivers/gpu/drm/exynos/Kconfig
  59. 0 1
      drivers/gpu/drm/exynos/exynos_drm_drv.c
  60. 4 4
      drivers/gpu/drm/exynos/exynos_drm_fimd.c
  61. 0 5
      drivers/gpu/drm/exynos/exynos_drm_gem.c
  62. 0 3
      drivers/gpu/drm/exynos/exynos_drm_gem.h
  63. 6 10
      drivers/gpu/drm/exynos/exynos_drm_vidi.c
  64. 1 0
      drivers/gpu/drm/gma500/Kconfig
  65. 1 0
      drivers/gpu/drm/gma500/cdv_device.c
  66. 1 1
      drivers/gpu/drm/gma500/cdv_intel_dp.c
  67. 1 1
      drivers/gpu/drm/gma500/framebuffer.c
  68. 0 5
      drivers/gpu/drm/gma500/gem.c
  69. 49 41
      drivers/gpu/drm/gma500/intel_gmbus.c
  70. 269 164
      drivers/gpu/drm/gma500/oaktrail_crtc.c
  71. 6 0
      drivers/gpu/drm/gma500/oaktrail_device.c
  72. 3 27
      drivers/gpu/drm/gma500/oaktrail_lvds.c
  73. 1 0
      drivers/gpu/drm/gma500/psb_device.c
  74. 34 5
      drivers/gpu/drm/gma500/psb_drv.c
  75. 52 6
      drivers/gpu/drm/gma500/psb_drv.h
  76. 1 1
      drivers/gpu/drm/gma500/psb_intel_display.c
  77. 38 21
      drivers/gpu/drm/gma500/psb_intel_sdvo.c
  78. 11 11
      drivers/gpu/drm/gma500/psb_irq.c
  79. 3 0
      drivers/gpu/drm/i2c/tda998x_drv.c
  80. 0 11
      drivers/gpu/drm/i810/i810_dma.c
  81. 67 0
      drivers/gpu/drm/i915/Kconfig
  82. 5 1
      drivers/gpu/drm/i915/Makefile
  83. 0 11
      drivers/gpu/drm/i915/dvo.h
  84. 1131 286
      drivers/gpu/drm/i915/i915_debugfs.c
  85. 59 59
      drivers/gpu/drm/i915/i915_dma.c
  86. 102 85
      drivers/gpu/drm/i915/i915_drv.c
  87. 296 141
      drivers/gpu/drm/i915/i915_drv.h
  88. 340 218
      drivers/gpu/drm/i915/i915_gem.c
  89. 40 24
      drivers/gpu/drm/i915/i915_gem_context.c
  90. 43 7
      drivers/gpu/drm/i915/i915_gem_evict.c
  91. 243 158
      drivers/gpu/drm/i915/i915_gem_execbuffer.c
  92. 488 20
      drivers/gpu/drm/i915/i915_gem_gtt.c
  93. 1 1
      drivers/gpu/drm/i915/i915_gem_stolen.c
  94. 4 4
      drivers/gpu/drm/i915/i915_gem_tiling.c
  95. 40 6
      drivers/gpu/drm/i915/i915_gpu_error.c
  96. 859 184
      drivers/gpu/drm/i915/i915_irq.c
  97. 783 44
      drivers/gpu/drm/i915/i915_reg.h
  98. 40 4
      drivers/gpu/drm/i915/i915_suspend.c
  99. 106 46
      drivers/gpu/drm/i915/i915_sysfs.c
  100. 59 3
      drivers/gpu/drm/i915/i915_trace.h

+ 2 - 0
MAINTAINERS

@@ -2849,7 +2849,9 @@ L:	dri-devel@lists.freedesktop.org
 L:	linux-tegra@vger.kernel.org
 L:	linux-tegra@vger.kernel.org
 T:	git git://anongit.freedesktop.org/tegra/linux.git
 T:	git git://anongit.freedesktop.org/tegra/linux.git
 S:	Supported
 S:	Supported
+F:	drivers/gpu/drm/tegra/
 F:	drivers/gpu/host1x/
 F:	drivers/gpu/host1x/
+F:	include/linux/host1x.h
 F:	include/uapi/drm/tegra_drm.h
 F:	include/uapi/drm/tegra_drm.h
 F:	Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
 F:	Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
 
 

+ 12 - 0
arch/x86/kernel/early-quirks.c

@@ -313,6 +313,16 @@ static size_t __init gen6_stolen_size(int num, int slot, int func)
 	return gmch_ctrl << 25; /* 32 MB units */
 	return gmch_ctrl << 25; /* 32 MB units */
 }
 }
 
 
+static inline size_t gen8_stolen_size(int num, int slot, int func)
+{
+	u16 gmch_ctrl;
+
+	gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
+	gmch_ctrl >>= BDW_GMCH_GMS_SHIFT;
+	gmch_ctrl &= BDW_GMCH_GMS_MASK;
+	return gmch_ctrl << 25; /* 32 MB units */
+}
+
 typedef size_t (*stolen_size_fn)(int num, int slot, int func);
 typedef size_t (*stolen_size_fn)(int num, int slot, int func);
 
 
 static struct pci_device_id intel_stolen_ids[] __initdata = {
 static struct pci_device_id intel_stolen_ids[] __initdata = {
@@ -336,6 +346,8 @@ static struct pci_device_id intel_stolen_ids[] __initdata = {
 	INTEL_IVB_D_IDS(gen6_stolen_size),
 	INTEL_IVB_D_IDS(gen6_stolen_size),
 	INTEL_HSW_D_IDS(gen6_stolen_size),
 	INTEL_HSW_D_IDS(gen6_stolen_size),
 	INTEL_HSW_M_IDS(gen6_stolen_size),
 	INTEL_HSW_M_IDS(gen6_stolen_size),
+	INTEL_BDW_M_IDS(gen8_stolen_size),
+	INTEL_BDW_D_IDS(gen8_stolen_size)
 };
 };
 
 
 static void __init intel_graphics_stolen(int num, int slot, int func)
 static void __init intel_graphics_stolen(int num, int slot, int func)

+ 14 - 59
drivers/gpu/drm/Kconfig

@@ -29,11 +29,17 @@ config DRM_USB
 config DRM_KMS_HELPER
 config DRM_KMS_HELPER
 	tristate
 	tristate
 	depends on DRM
 	depends on DRM
+	help
+	  CRTC helpers for KMS drivers.
+
+config DRM_KMS_FB_HELPER
+	bool
+	depends on DRM_KMS_HELPER
 	select FB
 	select FB
 	select FRAMEBUFFER_CONSOLE if !EXPERT
 	select FRAMEBUFFER_CONSOLE if !EXPERT
 	select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
 	select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
 	help
 	help
-	  FB and CRTC helpers for KMS drivers.
+	  FBDEV helpers for KMS drivers.
 
 
 config DRM_LOAD_EDID_FIRMWARE
 config DRM_LOAD_EDID_FIRMWARE
 	bool "Allow to specify an EDID data set instead of probing for it"
 	bool "Allow to specify an EDID data set instead of probing for it"
@@ -64,6 +70,7 @@ config DRM_GEM_CMA_HELPER
 config DRM_KMS_CMA_HELPER
 config DRM_KMS_CMA_HELPER
 	bool
 	bool
 	select DRM_GEM_CMA_HELPER
 	select DRM_GEM_CMA_HELPER
+	select DRM_KMS_FB_HELPER
 	select FB_SYS_FILLRECT
 	select FB_SYS_FILLRECT
 	select FB_SYS_COPYAREA
 	select FB_SYS_COPYAREA
 	select FB_SYS_IMAGEBLIT
 	select FB_SYS_IMAGEBLIT
@@ -96,6 +103,7 @@ config DRM_RADEON
 	select FB_CFB_IMAGEBLIT
 	select FB_CFB_IMAGEBLIT
 	select FW_LOADER
 	select FW_LOADER
         select DRM_KMS_HELPER
         select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
         select DRM_TTM
         select DRM_TTM
 	select POWER_SUPPLY
 	select POWER_SUPPLY
 	select HWMON
 	select HWMON
@@ -120,64 +128,7 @@ config DRM_I810
 	  selected, the module will be called i810.  AGP support is required
 	  selected, the module will be called i810.  AGP support is required
 	  for this driver to work.
 	  for this driver to work.
 
 
-config DRM_I915
-	tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
-	depends on DRM
-	depends on AGP
-	depends on AGP_INTEL
-	# we need shmfs for the swappable backing store, and in particular
-	# the shmem_readpage() which depends upon tmpfs
-	select SHMEM
-	select TMPFS
-	select DRM_KMS_HELPER
-	select FB_CFB_FILLRECT
-	select FB_CFB_COPYAREA
-	select FB_CFB_IMAGEBLIT
-	# i915 depends on ACPI_VIDEO when ACPI is enabled
-	# but for select to work, need to select ACPI_VIDEO's dependencies, ick
-	select BACKLIGHT_LCD_SUPPORT if ACPI
-	select BACKLIGHT_CLASS_DEVICE if ACPI
-	select VIDEO_OUTPUT_CONTROL if ACPI
-	select INPUT if ACPI
-	select THERMAL if ACPI
-	select ACPI_VIDEO if ACPI
-	select ACPI_BUTTON if ACPI
-	help
-	  Choose this option if you have a system that has "Intel Graphics
-	  Media Accelerator" or "HD Graphics" integrated graphics,
-	  including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
-	  G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
-	  Core i5, Core i7 as well as Atom CPUs with integrated graphics.
-	  If M is selected, the module will be called i915.  AGP support
-	  is required for this driver to work. This driver is used by
-	  the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
-	  replaces the older i830 module that supported a subset of the
-	  hardware in older X.org releases.
-
-	  Note that the older i810/i815 chipsets require the use of the
-	  i810 driver instead, and the Atom z5xx series has an entirely
-	  different implementation.
-
-config DRM_I915_KMS
-	bool "Enable modesetting on intel by default"
-	depends on DRM_I915
-	help
-	  Choose this option if you want kernel modesetting enabled by default,
-	  and you have a new enough userspace to support this. Running old
-	  userspaces with this enabled will cause pain.  Note that this causes
-	  the driver to bind to PCI devices, which precludes loading things
-	  like intelfb.
-
-config DRM_I915_PRELIMINARY_HW_SUPPORT
-	bool "Enable preliminary support for prerelease Intel hardware by default"
-	depends on DRM_I915
-	help
-	  Choose this option if you have prerelease Intel hardware and want the
-	  i915 driver to support it by default.  You can enable such support at
-	  runtime with the module option i915.preliminary_hw_support=1; this
-	  option changes the default for that module option.
-
-	  If in doubt, say "N".
+source "drivers/gpu/drm/i915/Kconfig"
 
 
 config DRM_MGA
 config DRM_MGA
 	tristate "Matrox g200/g400"
 	tristate "Matrox g200/g400"
@@ -225,6 +176,8 @@ source "drivers/gpu/drm/mgag200/Kconfig"
 
 
 source "drivers/gpu/drm/cirrus/Kconfig"
 source "drivers/gpu/drm/cirrus/Kconfig"
 
 
+source "drivers/gpu/drm/armada/Kconfig"
+
 source "drivers/gpu/drm/rcar-du/Kconfig"
 source "drivers/gpu/drm/rcar-du/Kconfig"
 
 
 source "drivers/gpu/drm/shmobile/Kconfig"
 source "drivers/gpu/drm/shmobile/Kconfig"
@@ -236,3 +189,5 @@ source "drivers/gpu/drm/tilcdc/Kconfig"
 source "drivers/gpu/drm/qxl/Kconfig"
 source "drivers/gpu/drm/qxl/Kconfig"
 
 
 source "drivers/gpu/drm/msm/Kconfig"
 source "drivers/gpu/drm/msm/Kconfig"
+
+source "drivers/gpu/drm/tegra/Kconfig"

+ 4 - 1
drivers/gpu/drm/Makefile

@@ -21,8 +21,9 @@ drm-$(CONFIG_PCI) += ati_pcigart.o
 
 
 drm-usb-y   := drm_usb.o
 drm-usb-y   := drm_usb.o
 
 
-drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o
+drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o
 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
+drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
 drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
 drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
 
 
 obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
 obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
@@ -49,10 +50,12 @@ obj-$(CONFIG_DRM_EXYNOS) +=exynos/
 obj-$(CONFIG_DRM_GMA500) += gma500/
 obj-$(CONFIG_DRM_GMA500) += gma500/
 obj-$(CONFIG_DRM_UDL) += udl/
 obj-$(CONFIG_DRM_UDL) += udl/
 obj-$(CONFIG_DRM_AST) += ast/
 obj-$(CONFIG_DRM_AST) += ast/
+obj-$(CONFIG_DRM_ARMADA) += armada/
 obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/
 obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/
 obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
 obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
 obj-$(CONFIG_DRM_OMAP)	+= omapdrm/
 obj-$(CONFIG_DRM_OMAP)	+= omapdrm/
 obj-$(CONFIG_DRM_TILCDC)	+= tilcdc/
 obj-$(CONFIG_DRM_TILCDC)	+= tilcdc/
 obj-$(CONFIG_DRM_QXL) += qxl/
 obj-$(CONFIG_DRM_QXL) += qxl/
 obj-$(CONFIG_DRM_MSM) += msm/
 obj-$(CONFIG_DRM_MSM) += msm/
+obj-$(CONFIG_DRM_TEGRA) += tegra/
 obj-y			+= i2c/
 obj-y			+= i2c/

+ 24 - 0
drivers/gpu/drm/armada/Kconfig

@@ -0,0 +1,24 @@
+config DRM_ARMADA
+	tristate "DRM support for Marvell Armada SoCs"
+	depends on DRM && HAVE_CLK && ARM
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	select DRM_KMS_HELPER
+	help
+	  Support the "LCD" controllers found on the Marvell Armada 510
+	  devices.  There are two controllers on the device, each controller
+	  supports graphics and video overlays.
+
+	  This driver provides no built-in acceleration; acceleration is
+	  performed by other IP found on the SoC.  This driver provides
+	  kernel mode setting and buffer management to userspace.
+
+config DRM_ARMADA_TDA1998X
+	bool "Support TDA1998X HDMI output"
+	depends on DRM_ARMADA != n
+	depends on I2C && DRM_I2C_NXP_TDA998X = y
+	default y
+	help
+	  Support the TDA1998x HDMI output device found on the Solid-Run
+	  CuBox.

+ 7 - 0
drivers/gpu/drm/armada/Makefile

@@ -0,0 +1,7 @@
+armada-y	:= armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
+		   armada_gem.o armada_output.o armada_overlay.o \
+		   armada_slave.o
+armada-y	+= armada_510.o
+armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
+
+obj-$(CONFIG_DRM_ARMADA) := armada.o

+ 87 - 0
drivers/gpu/drm/armada/armada_510.c

@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Armada 510 (aka Dove) variant support
+ */
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_hw.h"
+
+static int armada510_init(struct armada_private *priv, struct device *dev)
+{
+	priv->extclk[0] = devm_clk_get(dev, "ext_ref_clk_1");
+
+	if (IS_ERR(priv->extclk[0]) && PTR_ERR(priv->extclk[0]) == -ENOENT)
+		priv->extclk[0] = ERR_PTR(-EPROBE_DEFER);
+
+	return PTR_RET(priv->extclk[0]);
+}
+
+static int armada510_crtc_init(struct armada_crtc *dcrtc)
+{
+	/* Lower the watermark so to eliminate jitter at higher bandwidths */
+	armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F);
+	return 0;
+}
+
+/*
+ * Armada510 specific SCLK register selection.
+ * This gets called with sclk = NULL to test whether the mode is
+ * supportable, and again with sclk != NULL to set the clocks up for
+ * that.  The former can return an error, but the latter is expected
+ * not to.
+ *
+ * We currently are pretty rudimentary here, always selecting
+ * EXT_REF_CLK_1 for LCD0 and erroring LCD1.  This needs improvement!
+ */
+static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
+	const struct drm_display_mode *mode, uint32_t *sclk)
+{
+	struct armada_private *priv = dcrtc->crtc.dev->dev_private;
+	struct clk *clk = priv->extclk[0];
+	int ret;
+
+	if (dcrtc->num == 1)
+		return -EINVAL;
+
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	if (dcrtc->clk != clk) {
+		ret = clk_prepare_enable(clk);
+		if (ret)
+			return ret;
+		dcrtc->clk = clk;
+	}
+
+	if (sclk) {
+		uint32_t rate, ref, div;
+
+		rate = mode->clock * 1000;
+		ref = clk_round_rate(clk, rate);
+		div = DIV_ROUND_UP(ref, rate);
+		if (div < 1)
+			div = 1;
+
+		clk_set_rate(clk, ref);
+		*sclk = div | SCLK_510_EXTCLK1;
+	}
+
+	return 0;
+}
+
+const struct armada_variant armada510_ops = {
+	.has_spu_adv_reg = true,
+	.spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
+	.init = armada510_init,
+	.crtc_init = armada510_crtc_init,
+	.crtc_compute_clock = armada510_crtc_compute_clock,
+};

+ 1098 - 0
drivers/gpu/drm/armada/armada_crtc.c

@@ -0,0 +1,1098 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+
+struct armada_frame_work {
+	struct drm_pending_vblank_event *event;
+	struct armada_regs regs[4];
+	struct drm_framebuffer *old_fb;
+};
+
+enum csc_mode {
+	CSC_AUTO = 0,
+	CSC_YUV_CCIR601 = 1,
+	CSC_YUV_CCIR709 = 2,
+	CSC_RGB_COMPUTER = 1,
+	CSC_RGB_STUDIO = 2,
+};
+
+/*
+ * A note about interlacing.  Let's consider HDMI 1920x1080i.
+ * The timing parameters we have from X are:
+ *  Hact HsyA HsyI Htot  Vact VsyA VsyI Vtot
+ *  1920 2448 2492 2640  1080 1084 1094 1125
+ * Which get translated to:
+ *  Hact HsyA HsyI Htot  Vact VsyA VsyI Vtot
+ *  1920 2448 2492 2640   540  542  547  562
+ *
+ * This is how it is defined by CEA-861-D - line and pixel numbers are
+ * referenced to the rising edge of VSYNC and HSYNC.  Total clocks per
+ * line: 2640.  The odd frame, the first active line is at line 21, and
+ * the even frame, the first active line is 584.
+ *
+ * LN:    560     561     562     563             567     568    569
+ * DE:    ~~~|____________________________//__________________________
+ * HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____
+ * VSYNC: _________________________|~~~~~~//~~~~~~~~~~~~~~~|__________
+ *  22 blanking lines.  VSYNC at 1320 (referenced to the HSYNC rising edge).
+ *
+ * LN:    1123   1124    1125      1               5       6      7
+ * DE:    ~~~|____________________________//__________________________
+ * HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____
+ * VSYNC: ____________________|~~~~~~~~~~~//~~~~~~~~~~|_______________
+ *  23 blanking lines
+ *
+ * The Armada LCD Controller line and pixel numbers are, like X timings,
+ * referenced to the top left of the active frame.
+ *
+ * So, translating these to our LCD controller:
+ *  Odd frame, 563 total lines, VSYNC at line 543-548, pixel 1128.
+ *  Even frame, 562 total lines, VSYNC at line 542-547, pixel 2448.
+ * Note: Vsync front porch remains constant!
+ *
+ * if (odd_frame) {
+ *   vtotal = mode->crtc_vtotal + 1;
+ *   vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay + 1;
+ *   vhorizpos = mode->crtc_hsync_start - mode->crtc_htotal / 2
+ * } else {
+ *   vtotal = mode->crtc_vtotal;
+ *   vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay;
+ *   vhorizpos = mode->crtc_hsync_start;
+ * }
+ * vfrontporch = mode->crtc_vtotal - mode->crtc_vsync_end;
+ *
+ * So, we need to reprogram these registers on each vsync event:
+ *  LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL
+ *
+ * Note: we do not use the frame done interrupts because these appear
+ * to happen too early, and lead to jitter on the display (presumably
+ * they occur at the end of the last active line, before the vsync back
+ * porch, which we're reprogramming.)
+ */
+
+void
+armada_drm_crtc_update_regs(struct armada_crtc *dcrtc, struct armada_regs *regs)
+{
+	while (regs->offset != ~0) {
+		void __iomem *reg = dcrtc->base + regs->offset;
+		uint32_t val;
+
+		val = regs->mask;
+		if (val != 0)
+			val &= readl_relaxed(reg);
+		writel_relaxed(val | regs->val, reg);
+		++regs;
+	}
+}
+
+#define dpms_blanked(dpms)	((dpms) != DRM_MODE_DPMS_ON)
+
+static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
+{
+	uint32_t dumb_ctrl;
+
+	dumb_ctrl = dcrtc->cfg_dumb_ctrl;
+
+	if (!dpms_blanked(dcrtc->dpms))
+		dumb_ctrl |= CFG_DUMB_ENA;
+
+	/*
+	 * When the dumb interface isn't in DUMB24_RGB888_0 mode, it might
+	 * be using SPI or GPIO.  If we set this to DUMB_BLANK, we will
+	 * force LCD_D[23:0] to output blank color, overriding the GPIO or
+	 * SPI usage.  So leave it as-is unless in DUMB24_RGB888_0 mode.
+	 */
+	if (dpms_blanked(dcrtc->dpms) &&
+	    (dumb_ctrl & DUMB_MASK) == DUMB24_RGB888_0) {
+		dumb_ctrl &= ~DUMB_MASK;
+		dumb_ctrl |= DUMB_BLANK;
+	}
+
+	/*
+	 * The documentation doesn't indicate what the normal state of
+	 * the sync signals are.  Sebastian Hesselbart kindly probed
+	 * these signals on his board to determine their state.
+	 *
+	 * The non-inverted state of the sync signals is active high.
+	 * Setting these bits makes the appropriate signal active low.
+	 */
+	if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NCSYNC)
+		dumb_ctrl |= CFG_INV_CSYNC;
+	if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NHSYNC)
+		dumb_ctrl |= CFG_INV_HSYNC;
+	if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NVSYNC)
+		dumb_ctrl |= CFG_INV_VSYNC;
+
+	if (dcrtc->dumb_ctrl != dumb_ctrl) {
+		dcrtc->dumb_ctrl = dumb_ctrl;
+		writel_relaxed(dumb_ctrl, dcrtc->base + LCD_SPU_DUMB_CTRL);
+	}
+}
+
+static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
+	int x, int y, struct armada_regs *regs, bool interlaced)
+{
+	struct armada_gem_object *obj = drm_fb_obj(fb);
+	unsigned pitch = fb->pitches[0];
+	unsigned offset = y * pitch + x * fb->bits_per_pixel / 8;
+	uint32_t addr_odd, addr_even;
+	unsigned i = 0;
+
+	DRM_DEBUG_DRIVER("pitch %u x %d y %d bpp %d\n",
+		pitch, x, y, fb->bits_per_pixel);
+
+	addr_odd = addr_even = obj->dev_addr + offset;
+
+	if (interlaced) {
+		addr_even += pitch;
+		pitch *= 2;
+	}
+
+	/* write offset, base, and pitch */
+	armada_reg_queue_set(regs, i, addr_odd, LCD_CFG_GRA_START_ADDR0);
+	armada_reg_queue_set(regs, i, addr_even, LCD_CFG_GRA_START_ADDR1);
+	armada_reg_queue_mod(regs, i, pitch, 0xffff, LCD_CFG_GRA_PITCH);
+
+	return i;
+}
+
+static int armada_drm_crtc_queue_frame_work(struct armada_crtc *dcrtc,
+	struct armada_frame_work *work)
+{
+	struct drm_device *dev = dcrtc->crtc.dev;
+	unsigned long flags;
+	int ret;
+
+	ret = drm_vblank_get(dev, dcrtc->num);
+	if (ret) {
+		DRM_ERROR("failed to acquire vblank counter\n");
+		return ret;
+	}
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	if (!dcrtc->frame_work)
+		dcrtc->frame_work = work;
+	else
+		ret = -EBUSY;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	if (ret)
+		drm_vblank_put(dev, dcrtc->num);
+
+	return ret;
+}
+
+static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc)
+{
+	struct drm_device *dev = dcrtc->crtc.dev;
+	struct armada_frame_work *work = dcrtc->frame_work;
+
+	dcrtc->frame_work = NULL;
+
+	armada_drm_crtc_update_regs(dcrtc, work->regs);
+
+	if (work->event)
+		drm_send_vblank_event(dev, dcrtc->num, work->event);
+
+	drm_vblank_put(dev, dcrtc->num);
+
+	/* Finally, queue the process-half of the cleanup. */
+	__armada_drm_queue_unref_work(dcrtc->crtc.dev, work->old_fb);
+	kfree(work);
+}
+
+static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
+	struct drm_framebuffer *fb, bool force)
+{
+	struct armada_frame_work *work;
+
+	if (!fb)
+		return;
+
+	if (force) {
+		/* Display is disabled, so just drop the old fb */
+		drm_framebuffer_unreference(fb);
+		return;
+	}
+
+	work = kmalloc(sizeof(*work), GFP_KERNEL);
+	if (work) {
+		int i = 0;
+		work->event = NULL;
+		work->old_fb = fb;
+		armada_reg_queue_end(work->regs, i);
+
+		if (armada_drm_crtc_queue_frame_work(dcrtc, work) == 0)
+			return;
+
+		kfree(work);
+	}
+
+	/*
+	 * Oops - just drop the reference immediately and hope for
+	 * the best.  The worst that will happen is the buffer gets
+	 * reused before it has finished being displayed.
+	 */
+	drm_framebuffer_unreference(fb);
+}
+
+static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
+{
+	struct drm_device *dev = dcrtc->crtc.dev;
+
+	/*
+	 * Tell the DRM core that vblank IRQs aren't going to happen for
+	 * a while.  This cleans up any pending vblank events for us.
+	 */
+	drm_vblank_off(dev, dcrtc->num);
+
+	/* Handle any pending flip event. */
+	spin_lock_irq(&dev->event_lock);
+	if (dcrtc->frame_work)
+		armada_drm_crtc_complete_frame_work(dcrtc);
+	spin_unlock_irq(&dev->event_lock);
+}
+
+void armada_drm_crtc_gamma_set(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
+	int idx)
+{
+}
+
+void armada_drm_crtc_gamma_get(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+	int idx)
+{
+}
+
+/* The mode_config.mutex will be held for this call */
+static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
+{
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+	if (dcrtc->dpms != dpms) {
+		dcrtc->dpms = dpms;
+		armada_drm_crtc_update(dcrtc);
+		if (dpms_blanked(dpms))
+			armada_drm_vblank_off(dcrtc);
+	}
+}
+
+/*
+ * Prepare for a mode set.  Turn off overlay to ensure that we don't end
+ * up with the overlay size being bigger than the active screen size.
+ * We rely upon X refreshing this state after the mode set has completed.
+ *
+ * The mode_config.mutex will be held for this call
+ */
+static void armada_drm_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	struct drm_plane *plane;
+
+	/*
+	 * If we have an overlay plane associated with this CRTC, disable
+	 * it before the modeset to avoid its coordinates being outside
+	 * the new mode parameters.  DRM doesn't provide help with this.
+	 */
+	plane = dcrtc->plane;
+	if (plane) {
+		struct drm_framebuffer *fb = plane->fb;
+
+		plane->funcs->disable_plane(plane);
+		plane->fb = NULL;
+		plane->crtc = NULL;
+		drm_framebuffer_unreference(fb);
+	}
+}
+
+/* The mode_config.mutex will be held for this call */
+static void armada_drm_crtc_commit(struct drm_crtc *crtc)
+{
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+	if (dcrtc->dpms != DRM_MODE_DPMS_ON) {
+		dcrtc->dpms = DRM_MODE_DPMS_ON;
+		armada_drm_crtc_update(dcrtc);
+	}
+}
+
+/* The mode_config.mutex will be held for this call */
+static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+	const struct drm_display_mode *mode, struct drm_display_mode *adj)
+{
+	struct armada_private *priv = crtc->dev->dev_private;
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	int ret;
+
+	/* We can't do interlaced modes if we don't have the SPU_ADV_REG */
+	if (!priv->variant->has_spu_adv_reg &&
+	    adj->flags & DRM_MODE_FLAG_INTERLACE)
+		return false;
+
+	/* Check whether the display mode is possible */
+	ret = priv->variant->crtc_compute_clock(dcrtc, adj, NULL);
+	if (ret)
+		return false;
+
+	return true;
+}
+
+void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
+{
+	struct armada_vbl_event *e, *n;
+	void __iomem *base = dcrtc->base;
+
+	if (stat & DMA_FF_UNDERFLOW)
+		DRM_ERROR("video underflow on crtc %u\n", dcrtc->num);
+	if (stat & GRA_FF_UNDERFLOW)
+		DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num);
+
+	if (stat & VSYNC_IRQ)
+		drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num);
+
+	spin_lock(&dcrtc->irq_lock);
+
+	list_for_each_entry_safe(e, n, &dcrtc->vbl_list, node) {
+		list_del_init(&e->node);
+		drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+		e->fn(dcrtc, e->data);
+	}
+
+	if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
+		int i = stat & GRA_FRAME_IRQ0 ? 0 : 1;
+		uint32_t val;
+
+		writel_relaxed(dcrtc->v[i].spu_v_porch, base + LCD_SPU_V_PORCH);
+		writel_relaxed(dcrtc->v[i].spu_v_h_total,
+			       base + LCD_SPUT_V_H_TOTAL);
+
+		val = readl_relaxed(base + LCD_SPU_ADV_REG);
+		val &= ~(ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF | ADV_VSYNCOFFEN);
+		val |= dcrtc->v[i].spu_adv_reg;
+		writel_relaxed(val, base + LCD_SPU_ADV_REG);
+	}
+
+	if (stat & DUMB_FRAMEDONE && dcrtc->cursor_update) {
+		writel_relaxed(dcrtc->cursor_hw_pos,
+			       base + LCD_SPU_HWC_OVSA_HPXL_VLN);
+		writel_relaxed(dcrtc->cursor_hw_sz,
+			       base + LCD_SPU_HWC_HPXL_VLN);
+		armada_updatel(CFG_HWC_ENA,
+			       CFG_HWC_ENA | CFG_HWC_1BITMOD | CFG_HWC_1BITENA,
+			       base + LCD_SPU_DMA_CTRL0);
+		dcrtc->cursor_update = false;
+		armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+	}
+
+	spin_unlock(&dcrtc->irq_lock);
+
+	if (stat & GRA_FRAME_IRQ) {
+		struct drm_device *dev = dcrtc->crtc.dev;
+
+		spin_lock(&dev->event_lock);
+		if (dcrtc->frame_work)
+			armada_drm_crtc_complete_frame_work(dcrtc);
+		spin_unlock(&dev->event_lock);
+
+		wake_up(&dcrtc->frame_wait);
+	}
+}
+
+/* These are locked by dev->vbl_lock */
+void armada_drm_crtc_disable_irq(struct armada_crtc *dcrtc, u32 mask)
+{
+	if (dcrtc->irq_ena & mask) {
+		dcrtc->irq_ena &= ~mask;
+		writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+	}
+}
+
+void armada_drm_crtc_enable_irq(struct armada_crtc *dcrtc, u32 mask)
+{
+	if ((dcrtc->irq_ena & mask) != mask) {
+		dcrtc->irq_ena |= mask;
+		writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+		if (readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR) & mask)
+			writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+	}
+}
+
+static uint32_t armada_drm_crtc_calculate_csc(struct armada_crtc *dcrtc)
+{
+	struct drm_display_mode *adj = &dcrtc->crtc.mode;
+	uint32_t val = 0;
+
+	if (dcrtc->csc_yuv_mode == CSC_YUV_CCIR709)
+		val |= CFG_CSC_YUV_CCIR709;
+	if (dcrtc->csc_rgb_mode == CSC_RGB_STUDIO)
+		val |= CFG_CSC_RGB_STUDIO;
+
+	/*
+	 * In auto mode, set the colorimetry, based upon the HDMI spec.
+	 * 1280x720p, 1920x1080p and 1920x1080i use ITU709, others use
+	 * ITU601.  It may be more appropriate to set this depending on
+	 * the source - but what if the graphic frame is YUV and the
+	 * video frame is RGB?
+	 */
+	if ((adj->hdisplay == 1280 && adj->vdisplay == 720 &&
+	     !(adj->flags & DRM_MODE_FLAG_INTERLACE)) ||
+	    (adj->hdisplay == 1920 && adj->vdisplay == 1080)) {
+		if (dcrtc->csc_yuv_mode == CSC_AUTO)
+			val |= CFG_CSC_YUV_CCIR709;
+	}
+
+	/*
+	 * We assume we're connected to a TV-like device, so the YUV->RGB
+	 * conversion should produce a limited range.  We should set this
+	 * depending on the connectors attached to this CRTC, and what
+	 * kind of device they report being connected.
+	 */
+	if (dcrtc->csc_rgb_mode == CSC_AUTO)
+		val |= CFG_CSC_RGB_STUDIO;
+
+	return val;
+}
+
+/* The mode_config.mutex will be held for this call */
+static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
+	struct drm_display_mode *mode, struct drm_display_mode *adj,
+	int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct armada_private *priv = crtc->dev->dev_private;
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	struct armada_regs regs[17];
+	uint32_t lm, rm, tm, bm, val, sclk;
+	unsigned long flags;
+	unsigned i;
+	bool interlaced;
+
+	drm_framebuffer_reference(crtc->fb);
+
+	interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
+
+	i = armada_drm_crtc_calc_fb(dcrtc->crtc.fb, x, y, regs, interlaced);
+
+	rm = adj->crtc_hsync_start - adj->crtc_hdisplay;
+	lm = adj->crtc_htotal - adj->crtc_hsync_end;
+	bm = adj->crtc_vsync_start - adj->crtc_vdisplay;
+	tm = adj->crtc_vtotal - adj->crtc_vsync_end;
+
+	DRM_DEBUG_DRIVER("H: %d %d %d %d lm %d rm %d\n",
+		adj->crtc_hdisplay,
+		adj->crtc_hsync_start,
+		adj->crtc_hsync_end,
+		adj->crtc_htotal, lm, rm);
+	DRM_DEBUG_DRIVER("V: %d %d %d %d tm %d bm %d\n",
+		adj->crtc_vdisplay,
+		adj->crtc_vsync_start,
+		adj->crtc_vsync_end,
+		adj->crtc_vtotal, tm, bm);
+
+	/* Wait for pending flips to complete */
+	wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
+
+	drm_vblank_pre_modeset(crtc->dev, dcrtc->num);
+
+	crtc->mode = *adj;
+
+	val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA;
+	if (val != dcrtc->dumb_ctrl) {
+		dcrtc->dumb_ctrl = val;
+		writel_relaxed(val, dcrtc->base + LCD_SPU_DUMB_CTRL);
+	}
+
+	/* Now compute the divider for real */
+	priv->variant->crtc_compute_clock(dcrtc, adj, &sclk);
+
+	/* Ensure graphic fifo is enabled */
+	armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
+	armada_reg_queue_set(regs, i, sclk, LCD_CFG_SCLK_DIV);
+
+	if (interlaced ^ dcrtc->interlaced) {
+		if (adj->flags & DRM_MODE_FLAG_INTERLACE)
+			drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
+		else
+			drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+		dcrtc->interlaced = interlaced;
+	}
+
+	spin_lock_irqsave(&dcrtc->irq_lock, flags);
+
+	/* Even interlaced/progressive frame */
+	dcrtc->v[1].spu_v_h_total = adj->crtc_vtotal << 16 |
+				    adj->crtc_htotal;
+	dcrtc->v[1].spu_v_porch = tm << 16 | bm;
+	val = adj->crtc_hsync_start;
+	dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
+		priv->variant->spu_adv_reg;
+
+	if (interlaced) {
+		/* Odd interlaced frame */
+		dcrtc->v[0].spu_v_h_total = dcrtc->v[1].spu_v_h_total +
+						(1 << 16);
+		dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1;
+		val = adj->crtc_hsync_start - adj->crtc_htotal / 2;
+		dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
+			priv->variant->spu_adv_reg;
+	} else {
+		dcrtc->v[0] = dcrtc->v[1];
+	}
+
+	val = adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
+
+	armada_reg_queue_set(regs, i, val, LCD_SPU_V_H_ACTIVE);
+	armada_reg_queue_set(regs, i, val, LCD_SPU_GRA_HPXL_VLN);
+	armada_reg_queue_set(regs, i, val, LCD_SPU_GZM_HPXL_VLN);
+	armada_reg_queue_set(regs, i, (lm << 16) | rm, LCD_SPU_H_PORCH);
+	armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_porch, LCD_SPU_V_PORCH);
+	armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
+			   LCD_SPUT_V_H_TOTAL);
+
+	if (priv->variant->has_spu_adv_reg) {
+		armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg,
+				     ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF |
+				     ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
+	}
+
+	val = CFG_GRA_ENA | CFG_GRA_HSMOOTH;
+	val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.fb)->fmt);
+	val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.fb)->mod);
+
+	if (drm_fb_to_armada_fb(dcrtc->crtc.fb)->fmt > CFG_420)
+		val |= CFG_PALETTE_ENA;
+
+	if (interlaced)
+		val |= CFG_GRA_FTOGGLE;
+
+	armada_reg_queue_mod(regs, i, val, CFG_GRAFORMAT |
+			     CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
+					 CFG_SWAPYU | CFG_YUV2RGB) |
+			     CFG_PALETTE_ENA | CFG_GRA_FTOGGLE,
+			     LCD_SPU_DMA_CTRL0);
+
+	val = adj->flags & DRM_MODE_FLAG_NVSYNC ? CFG_VSYNC_INV : 0;
+	armada_reg_queue_mod(regs, i, val, CFG_VSYNC_INV, LCD_SPU_DMA_CTRL1);
+
+	val = dcrtc->spu_iopad_ctrl | armada_drm_crtc_calculate_csc(dcrtc);
+	armada_reg_queue_set(regs, i, val, LCD_SPU_IOPAD_CONTROL);
+	armada_reg_queue_end(regs, i);
+
+	armada_drm_crtc_update_regs(dcrtc, regs);
+	spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+
+	armada_drm_crtc_update(dcrtc);
+
+	drm_vblank_post_modeset(crtc->dev, dcrtc->num);
+	armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+
+	return 0;
+}
+
+/* The mode_config.mutex will be held for this call */
+static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+	struct drm_framebuffer *old_fb)
+{
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	struct armada_regs regs[4];
+	unsigned i;
+
+	i = armada_drm_crtc_calc_fb(crtc->fb, crtc->x, crtc->y, regs,
+				    dcrtc->interlaced);
+	armada_reg_queue_end(regs, i);
+
+	/* Wait for pending flips to complete */
+	wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
+
+	/* Take a reference to the new fb as we're using it */
+	drm_framebuffer_reference(crtc->fb);
+
+	/* Update the base in the CRTC */
+	armada_drm_crtc_update_regs(dcrtc, regs);
+
+	/* Drop our previously held reference */
+	armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+
+	return 0;
+}
+
+static void armada_drm_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+/* The mode_config.mutex will be held for this call */
+static void armada_drm_crtc_disable(struct drm_crtc *crtc)
+{
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+	armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+	armada_drm_crtc_finish_fb(dcrtc, crtc->fb, true);
+
+	/* Power down most RAMs and FIFOs */
+	writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
+		       CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
+		       CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
+}
+
+static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
+	.dpms		= armada_drm_crtc_dpms,
+	.prepare	= armada_drm_crtc_prepare,
+	.commit		= armada_drm_crtc_commit,
+	.mode_fixup	= armada_drm_crtc_mode_fixup,
+	.mode_set	= armada_drm_crtc_mode_set,
+	.mode_set_base	= armada_drm_crtc_mode_set_base,
+	.load_lut	= armada_drm_crtc_load_lut,
+	.disable	= armada_drm_crtc_disable,
+};
+
+static void armada_load_cursor_argb(void __iomem *base, uint32_t *pix,
+	unsigned stride, unsigned width, unsigned height)
+{
+	uint32_t addr;
+	unsigned y;
+
+	addr = SRAM_HWC32_RAM1;
+	for (y = 0; y < height; y++) {
+		uint32_t *p = &pix[y * stride];
+		unsigned x;
+
+		for (x = 0; x < width; x++, p++) {
+			uint32_t val = *p;
+
+			val = (val & 0xff00ff00) |
+			      (val & 0x000000ff) << 16 |
+			      (val & 0x00ff0000) >> 16;
+
+			writel_relaxed(val,
+				       base + LCD_SPU_SRAM_WRDAT);
+			writel_relaxed(addr | SRAM_WRITE,
+				       base + LCD_SPU_SRAM_CTRL);
+			addr += 1;
+			if ((addr & 0x00ff) == 0)
+				addr += 0xf00;
+			if ((addr & 0x30ff) == 0)
+				addr = SRAM_HWC32_RAM2;
+		}
+	}
+}
+
+static void armada_drm_crtc_cursor_tran(void __iomem *base)
+{
+	unsigned addr;
+
+	for (addr = 0; addr < 256; addr++) {
+		/* write the default value */
+		writel_relaxed(0x55555555, base + LCD_SPU_SRAM_WRDAT);
+		writel_relaxed(addr | SRAM_WRITE | SRAM_HWC32_TRAN,
+			       base + LCD_SPU_SRAM_CTRL);
+	}
+}
+
+static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
+{
+	uint32_t xoff, xscr, w = dcrtc->cursor_w, s;
+	uint32_t yoff, yscr, h = dcrtc->cursor_h;
+	uint32_t para1;
+
+	/*
+	 * Calculate the visible width and height of the cursor,
+	 * screen position, and the position in the cursor bitmap.
+	 */
+	if (dcrtc->cursor_x < 0) {
+		xoff = -dcrtc->cursor_x;
+		xscr = 0;
+		w -= min(xoff, w);
+	} else if (dcrtc->cursor_x + w > dcrtc->crtc.mode.hdisplay) {
+		xoff = 0;
+		xscr = dcrtc->cursor_x;
+		w = max_t(int, dcrtc->crtc.mode.hdisplay - dcrtc->cursor_x, 0);
+	} else {
+		xoff = 0;
+		xscr = dcrtc->cursor_x;
+	}
+
+	if (dcrtc->cursor_y < 0) {
+		yoff = -dcrtc->cursor_y;
+		yscr = 0;
+		h -= min(yoff, h);
+	} else if (dcrtc->cursor_y + h > dcrtc->crtc.mode.vdisplay) {
+		yoff = 0;
+		yscr = dcrtc->cursor_y;
+		h = max_t(int, dcrtc->crtc.mode.vdisplay - dcrtc->cursor_y, 0);
+	} else {
+		yoff = 0;
+		yscr = dcrtc->cursor_y;
+	}
+
+	/* On interlaced modes, the vertical cursor size must be halved */
+	s = dcrtc->cursor_w;
+	if (dcrtc->interlaced) {
+		s *= 2;
+		yscr /= 2;
+		h /= 2;
+	}
+
+	if (!dcrtc->cursor_obj || !h || !w) {
+		spin_lock_irq(&dcrtc->irq_lock);
+		armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+		dcrtc->cursor_update = false;
+		armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
+		spin_unlock_irq(&dcrtc->irq_lock);
+		return 0;
+	}
+
+	para1 = readl_relaxed(dcrtc->base + LCD_SPU_SRAM_PARA1);
+	armada_updatel(CFG_CSB_256x32, CFG_CSB_256x32 | CFG_PDWN256x32,
+		       dcrtc->base + LCD_SPU_SRAM_PARA1);
+
+	/*
+	 * Initialize the transparency if the SRAM was powered down.
+	 * We must also reload the cursor data as well.
+	 */
+	if (!(para1 & CFG_CSB_256x32)) {
+		armada_drm_crtc_cursor_tran(dcrtc->base);
+		reload = true;
+	}
+
+	if (dcrtc->cursor_hw_sz != (h << 16 | w)) {
+		spin_lock_irq(&dcrtc->irq_lock);
+		armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+		dcrtc->cursor_update = false;
+		armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
+		spin_unlock_irq(&dcrtc->irq_lock);
+		reload = true;
+	}
+	if (reload) {
+		struct armada_gem_object *obj = dcrtc->cursor_obj;
+		uint32_t *pix;
+		/* Set the top-left corner of the cursor image */
+		pix = obj->addr;
+		pix += yoff * s + xoff;
+		armada_load_cursor_argb(dcrtc->base, pix, s, w, h);
+	}
+
+	/* Reload the cursor position, size and enable in the IRQ handler */
+	spin_lock_irq(&dcrtc->irq_lock);
+	dcrtc->cursor_hw_pos = yscr << 16 | xscr;
+	dcrtc->cursor_hw_sz = h << 16 | w;
+	dcrtc->cursor_update = true;
+	armada_drm_crtc_enable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+	spin_unlock_irq(&dcrtc->irq_lock);
+
+	return 0;
+}
+
+static void cursor_update(void *data)
+{
+	armada_drm_crtc_cursor_update(data, true);
+}
+
+static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
+	struct drm_file *file, uint32_t handle, uint32_t w, uint32_t h)
+{
+	struct drm_device *dev = crtc->dev;
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	struct armada_private *priv = crtc->dev->dev_private;
+	struct armada_gem_object *obj = NULL;
+	int ret;
+
+	/* If no cursor support, replicate drm's return value */
+	if (!priv->variant->has_spu_adv_reg)
+		return -ENXIO;
+
+	if (handle && w > 0 && h > 0) {
+		/* maximum size is 64x32 or 32x64 */
+		if (w > 64 || h > 64 || (w > 32 && h > 32))
+			return -ENOMEM;
+
+		obj = armada_gem_object_lookup(dev, file, handle);
+		if (!obj)
+			return -ENOENT;
+
+		/* Must be a kernel-mapped object */
+		if (!obj->addr) {
+			drm_gem_object_unreference_unlocked(&obj->obj);
+			return -EINVAL;
+		}
+
+		if (obj->obj.size < w * h * 4) {
+			DRM_ERROR("buffer is too small\n");
+			drm_gem_object_unreference_unlocked(&obj->obj);
+			return -ENOMEM;
+		}
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	if (dcrtc->cursor_obj) {
+		dcrtc->cursor_obj->update = NULL;
+		dcrtc->cursor_obj->update_data = NULL;
+		drm_gem_object_unreference(&dcrtc->cursor_obj->obj);
+	}
+	dcrtc->cursor_obj = obj;
+	dcrtc->cursor_w = w;
+	dcrtc->cursor_h = h;
+	ret = armada_drm_crtc_cursor_update(dcrtc, true);
+	if (obj) {
+		obj->update_data = dcrtc;
+		obj->update = cursor_update;
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+	struct drm_device *dev = crtc->dev;
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	struct armada_private *priv = crtc->dev->dev_private;
+	int ret;
+
+	/* If no cursor support, replicate drm's return value */
+	if (!priv->variant->has_spu_adv_reg)
+		return -EFAULT;
+
+	mutex_lock(&dev->struct_mutex);
+	dcrtc->cursor_x = x;
+	dcrtc->cursor_y = y;
+	ret = armada_drm_crtc_cursor_update(dcrtc, false);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	struct armada_private *priv = crtc->dev->dev_private;
+
+	if (dcrtc->cursor_obj)
+		drm_gem_object_unreference(&dcrtc->cursor_obj->obj);
+
+	priv->dcrtc[dcrtc->num] = NULL;
+	drm_crtc_cleanup(&dcrtc->crtc);
+
+	if (!IS_ERR(dcrtc->clk))
+		clk_disable_unprepare(dcrtc->clk);
+
+	kfree(dcrtc);
+}
+
+/*
+ * The mode_config lock is held here, to prevent races between this
+ * and a mode_set.
+ */
+static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
+	struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
+{
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	struct armada_frame_work *work;
+	struct drm_device *dev = crtc->dev;
+	unsigned long flags;
+	unsigned i;
+	int ret;
+
+	/* We don't support changing the pixel format */
+	if (fb->pixel_format != crtc->fb->pixel_format)
+		return -EINVAL;
+
+	work = kmalloc(sizeof(*work), GFP_KERNEL);
+	if (!work)
+		return -ENOMEM;
+
+	work->event = event;
+	work->old_fb = dcrtc->crtc.fb;
+
+	i = armada_drm_crtc_calc_fb(fb, crtc->x, crtc->y, work->regs,
+				    dcrtc->interlaced);
+	armada_reg_queue_end(work->regs, i);
+
+	/*
+	 * Hold the old framebuffer for the work - DRM appears to drop our
+	 * reference to the old framebuffer in drm_mode_page_flip_ioctl().
+	 */
+	drm_framebuffer_reference(work->old_fb);
+
+	ret = armada_drm_crtc_queue_frame_work(dcrtc, work);
+	if (ret) {
+		/*
+		 * Undo our reference above; DRM does not drop the reference
+		 * to this object on error, so that's okay.
+		 */
+		drm_framebuffer_unreference(work->old_fb);
+		kfree(work);
+		return ret;
+	}
+
+	/*
+	 * Don't take a reference on the new framebuffer;
+	 * drm_mode_page_flip_ioctl() has already grabbed a reference and
+	 * will _not_ drop that reference on successful return from this
+	 * function.  Simply mark this new framebuffer as the current one.
+	 */
+	dcrtc->crtc.fb = fb;
+
+	/*
+	 * Finally, if the display is blanked, we won't receive an
+	 * interrupt, so complete it now.
+	 */
+	if (dpms_blanked(dcrtc->dpms)) {
+		spin_lock_irqsave(&dev->event_lock, flags);
+		if (dcrtc->frame_work)
+			armada_drm_crtc_complete_frame_work(dcrtc);
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+	}
+
+	return 0;
+}
+
+static int
+armada_drm_crtc_set_property(struct drm_crtc *crtc,
+	struct drm_property *property, uint64_t val)
+{
+	struct armada_private *priv = crtc->dev->dev_private;
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	bool update_csc = false;
+
+	if (property == priv->csc_yuv_prop) {
+		dcrtc->csc_yuv_mode = val;
+		update_csc = true;
+	} else if (property == priv->csc_rgb_prop) {
+		dcrtc->csc_rgb_mode = val;
+		update_csc = true;
+	}
+
+	if (update_csc) {
+		uint32_t val;
+
+		val = dcrtc->spu_iopad_ctrl |
+		      armada_drm_crtc_calculate_csc(dcrtc);
+		writel_relaxed(val, dcrtc->base + LCD_SPU_IOPAD_CONTROL);
+	}
+
+	return 0;
+}
+
+static struct drm_crtc_funcs armada_crtc_funcs = {
+	.cursor_set	= armada_drm_crtc_cursor_set,
+	.cursor_move	= armada_drm_crtc_cursor_move,
+	.destroy	= armada_drm_crtc_destroy,
+	.set_config	= drm_crtc_helper_set_config,
+	.page_flip	= armada_drm_crtc_page_flip,
+	.set_property	= armada_drm_crtc_set_property,
+};
+
+static struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = {
+	{ CSC_AUTO,        "Auto" },
+	{ CSC_YUV_CCIR601, "CCIR601" },
+	{ CSC_YUV_CCIR709, "CCIR709" },
+};
+
+static struct drm_prop_enum_list armada_drm_csc_rgb_enum_list[] = {
+	{ CSC_AUTO,         "Auto" },
+	{ CSC_RGB_COMPUTER, "Computer system" },
+	{ CSC_RGB_STUDIO,   "Studio" },
+};
+
+static int armada_drm_crtc_create_properties(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+
+	if (priv->csc_yuv_prop)
+		return 0;
+
+	priv->csc_yuv_prop = drm_property_create_enum(dev, 0,
+				"CSC_YUV", armada_drm_csc_yuv_enum_list,
+				ARRAY_SIZE(armada_drm_csc_yuv_enum_list));
+	priv->csc_rgb_prop = drm_property_create_enum(dev, 0,
+				"CSC_RGB", armada_drm_csc_rgb_enum_list,
+				ARRAY_SIZE(armada_drm_csc_rgb_enum_list));
+
+	if (!priv->csc_yuv_prop || !priv->csc_rgb_prop)
+		return -ENOMEM;
+
+	return 0;
+}
+
+int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
+	struct resource *res)
+{
+	struct armada_private *priv = dev->dev_private;
+	struct armada_crtc *dcrtc;
+	void __iomem *base;
+	int ret;
+
+	ret = armada_drm_crtc_create_properties(dev);
+	if (ret)
+		return ret;
+
+	base = devm_request_and_ioremap(dev->dev, res);
+	if (!base) {
+		DRM_ERROR("failed to ioremap register\n");
+		return -ENOMEM;
+	}
+
+	dcrtc = kzalloc(sizeof(*dcrtc), GFP_KERNEL);
+	if (!dcrtc) {
+		DRM_ERROR("failed to allocate Armada crtc\n");
+		return -ENOMEM;
+	}
+
+	dcrtc->base = base;
+	dcrtc->num = num;
+	dcrtc->clk = ERR_PTR(-EINVAL);
+	dcrtc->csc_yuv_mode = CSC_AUTO;
+	dcrtc->csc_rgb_mode = CSC_AUTO;
+	dcrtc->cfg_dumb_ctrl = DUMB24_RGB888_0;
+	dcrtc->spu_iopad_ctrl = CFG_VSCALE_LN_EN | CFG_IOPAD_DUMB24;
+	spin_lock_init(&dcrtc->irq_lock);
+	dcrtc->irq_ena = CLEAN_SPU_IRQ_ISR;
+	INIT_LIST_HEAD(&dcrtc->vbl_list);
+	init_waitqueue_head(&dcrtc->frame_wait);
+
+	/* Initialize some registers which we don't otherwise set */
+	writel_relaxed(0x00000001, dcrtc->base + LCD_CFG_SCLK_DIV);
+	writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_BLANKCOLOR);
+	writel_relaxed(dcrtc->spu_iopad_ctrl,
+		       dcrtc->base + LCD_SPU_IOPAD_CONTROL);
+	writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_SRAM_PARA0);
+	writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
+		       CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
+		       CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
+	writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
+	writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_GRA_OVSA_HPXL_VLN);
+
+	if (priv->variant->crtc_init) {
+		ret = priv->variant->crtc_init(dcrtc);
+		if (ret) {
+			kfree(dcrtc);
+			return ret;
+		}
+	}
+
+	/* Ensure AXI pipeline is enabled */
+	armada_updatel(CFG_ARBFAST_ENA, 0, dcrtc->base + LCD_SPU_DMA_CTRL0);
+
+	priv->dcrtc[dcrtc->num] = dcrtc;
+
+	drm_crtc_init(dev, &dcrtc->crtc, &armada_crtc_funcs);
+	drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
+
+	drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
+				   dcrtc->csc_yuv_mode);
+	drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop,
+				   dcrtc->csc_rgb_mode);
+
+	return armada_overlay_plane_create(dev, 1 << dcrtc->num);
+}

+ 83 - 0
drivers/gpu/drm/armada/armada_crtc.h

@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_CRTC_H
+#define ARMADA_CRTC_H
+
+struct armada_gem_object;
+
+struct armada_regs {
+	uint32_t offset;
+	uint32_t mask;
+	uint32_t val;
+};
+
+#define armada_reg_queue_mod(_r, _i, _v, _m, _o)	\
+	do {					\
+		struct armada_regs *__reg = _r;	\
+		__reg[_i].offset = _o;		\
+		__reg[_i].mask = ~(_m);		\
+		__reg[_i].val = _v;		\
+		_i++;				\
+	} while (0)
+
+#define armada_reg_queue_set(_r, _i, _v, _o)	\
+	armada_reg_queue_mod(_r, _i, _v, ~0, _o)
+
+#define armada_reg_queue_end(_r, _i)		\
+	armada_reg_queue_mod(_r, _i, 0, 0, ~0)
+
+struct armada_frame_work;
+
+struct armada_crtc {
+	struct drm_crtc		crtc;
+	unsigned		num;
+	void __iomem		*base;
+	struct clk		*clk;
+	struct {
+		uint32_t	spu_v_h_total;
+		uint32_t	spu_v_porch;
+		uint32_t	spu_adv_reg;
+	} v[2];
+	bool			interlaced;
+	bool			cursor_update;
+	uint8_t			csc_yuv_mode;
+	uint8_t			csc_rgb_mode;
+
+	struct drm_plane	*plane;
+
+	struct armada_gem_object	*cursor_obj;
+	int			cursor_x;
+	int			cursor_y;
+	uint32_t		cursor_hw_pos;
+	uint32_t		cursor_hw_sz;
+	uint32_t		cursor_w;
+	uint32_t		cursor_h;
+
+	int			dpms;
+	uint32_t		cfg_dumb_ctrl;
+	uint32_t		dumb_ctrl;
+	uint32_t		spu_iopad_ctrl;
+
+	wait_queue_head_t	frame_wait;
+	struct armada_frame_work *frame_work;
+
+	spinlock_t		irq_lock;
+	uint32_t		irq_ena;
+	struct list_head	vbl_list;
+};
+#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
+
+int armada_drm_crtc_create(struct drm_device *, unsigned, struct resource *);
+void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int);
+void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int);
+void armada_drm_crtc_irq(struct armada_crtc *, u32);
+void armada_drm_crtc_disable_irq(struct armada_crtc *, u32);
+void armada_drm_crtc_enable_irq(struct armada_crtc *, u32);
+void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
+
+#endif

+ 177 - 0
drivers/gpu/drm/armada/armada_debugfs.c

@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <drm/drmP.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+
+static int armada_debugfs_gem_linear_show(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct armada_private *priv = dev->dev_private;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	ret = drm_mm_dump_table(m, &priv->linear);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+static int armada_debugfs_reg_show(struct seq_file *m, void *data)
+{
+	struct drm_device *dev = m->private;
+	struct armada_private *priv = dev->dev_private;
+	int n, i;
+
+	if (priv) {
+		for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
+			struct armada_crtc *dcrtc = priv->dcrtc[n];
+			if (!dcrtc)
+				continue;
+
+			for (i = 0x84; i <= 0x1c4; i += 4) {
+				uint32_t v = readl_relaxed(dcrtc->base + i);
+				seq_printf(m, "%u: 0x%04x: 0x%08x\n", n, i, v);
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int armada_debugfs_reg_r_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, armada_debugfs_reg_show, inode->i_private);
+}
+
+static const struct file_operations fops_reg_r = {
+	.owner = THIS_MODULE,
+	.open = armada_debugfs_reg_r_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int armada_debugfs_write(struct file *file, const char __user *ptr,
+	size_t len, loff_t *off)
+{
+	struct drm_device *dev = file->private_data;
+	struct armada_private *priv = dev->dev_private;
+	struct armada_crtc *dcrtc = priv->dcrtc[0];
+	char buf[32], *p;
+	uint32_t reg, val;
+	int ret;
+
+	if (*off != 0)
+		return 0;
+
+	if (len > sizeof(buf) - 1)
+		len = sizeof(buf) - 1;
+
+	ret = strncpy_from_user(buf, ptr, len);
+	if (ret < 0)
+		return ret;
+	buf[len] = '\0';
+
+	reg = simple_strtoul(buf, &p, 16);
+	if (!isspace(*p))
+		return -EINVAL;
+	val = simple_strtoul(p + 1, NULL, 16);
+
+	if (reg >= 0x84 && reg <= 0x1c4)
+		writel(val, dcrtc->base + reg);
+
+	return len;
+}
+
+static const struct file_operations fops_reg_w = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.write = armada_debugfs_write,
+	.llseek = noop_llseek,
+};
+
+static struct drm_info_list armada_debugfs_list[] = {
+	{ "gem_linear", armada_debugfs_gem_linear_show, 0 },
+};
+#define ARMADA_DEBUGFS_ENTRIES ARRAY_SIZE(armada_debugfs_list)
+
+static int drm_add_fake_info_node(struct drm_minor *minor, struct dentry *ent,
+	const void *key)
+{
+	struct drm_info_node *node;
+
+	node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
+	if (node == NULL) {
+		debugfs_remove(ent);
+		return -ENOMEM;
+	}
+
+	node->minor = minor;
+	node->dent = ent;
+	node->info_ent = (void *) key;
+
+	mutex_lock(&minor->debugfs_lock);
+	list_add(&node->list, &minor->debugfs_list);
+	mutex_unlock(&minor->debugfs_lock);
+
+	return 0;
+}
+
+static int armada_debugfs_create(struct dentry *root, struct drm_minor *minor,
+	const char *name, umode_t mode, const struct file_operations *fops)
+{
+	struct dentry *de;
+
+	de = debugfs_create_file(name, mode, root, minor->dev, fops);
+
+	return drm_add_fake_info_node(minor, de, fops);
+}
+
+int armada_drm_debugfs_init(struct drm_minor *minor)
+{
+	int ret;
+
+	ret = drm_debugfs_create_files(armada_debugfs_list,
+				       ARMADA_DEBUGFS_ENTRIES,
+				       minor->debugfs_root, minor);
+	if (ret)
+		return ret;
+
+	ret = armada_debugfs_create(minor->debugfs_root, minor,
+				   "reg", S_IFREG | S_IRUSR, &fops_reg_r);
+	if (ret)
+		goto err_1;
+
+	ret = armada_debugfs_create(minor->debugfs_root, minor,
+				"reg_wr", S_IFREG | S_IWUSR, &fops_reg_w);
+	if (ret)
+		goto err_2;
+	return ret;
+
+ err_2:
+	drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor);
+ err_1:
+	drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
+				 minor);
+	return ret;
+}
+
+void armada_drm_debugfs_cleanup(struct drm_minor *minor)
+{
+	drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_w, 1, minor);
+	drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor);
+	drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
+				 minor);
+}

+ 113 - 0
drivers/gpu/drm/armada/armada_drm.h

@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_DRM_H
+#define ARMADA_DRM_H
+
+#include <linux/kfifo.h>
+#include <linux/io.h>
+#include <linux/workqueue.h>
+#include <drm/drmP.h>
+
+struct armada_crtc;
+struct armada_gem_object;
+struct clk;
+struct drm_fb_helper;
+
+static inline void
+armada_updatel(uint32_t val, uint32_t mask, void __iomem *ptr)
+{
+	uint32_t ov, v;
+
+	ov = v = readl_relaxed(ptr);
+	v = (v & ~mask) | val;
+	if (ov != v)
+		writel_relaxed(v, ptr);
+}
+
+static inline uint32_t armada_pitch(uint32_t width, uint32_t bpp)
+{
+	uint32_t pitch = bpp != 4 ? width * ((bpp + 7) / 8) : width / 2;
+
+	/* 88AP510 spec recommends pitch be a multiple of 128 */
+	return ALIGN(pitch, 128);
+}
+
+struct armada_vbl_event {
+	struct list_head	node;
+	void			*data;
+	void			(*fn)(struct armada_crtc *, void *);
+};
+void armada_drm_vbl_event_add(struct armada_crtc *,
+	struct armada_vbl_event *);
+void armada_drm_vbl_event_remove(struct armada_crtc *,
+	struct armada_vbl_event *);
+void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *,
+	struct armada_vbl_event *);
+#define armada_drm_vbl_event_init(_e, _f, _d) do {	\
+	struct armada_vbl_event *__e = _e;		\
+	INIT_LIST_HEAD(&__e->node);			\
+	__e->data = _d;					\
+	__e->fn = _f;					\
+} while (0)
+
+
+struct armada_private;
+
+struct armada_variant {
+	bool	has_spu_adv_reg;
+	uint32_t spu_adv_reg;
+	int (*init)(struct armada_private *, struct device *);
+	int (*crtc_init)(struct armada_crtc *);
+	int (*crtc_compute_clock)(struct armada_crtc *,
+				  const struct drm_display_mode *,
+				  uint32_t *);
+};
+
+/* Variant ops */
+extern const struct armada_variant armada510_ops;
+
+struct armada_private {
+	const struct armada_variant *variant;
+	struct work_struct	fb_unref_work;
+	DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
+	struct drm_fb_helper	*fbdev;
+	struct armada_crtc	*dcrtc[2];
+	struct drm_mm		linear;
+	struct clk		*extclk[2];
+	struct drm_property	*csc_yuv_prop;
+	struct drm_property	*csc_rgb_prop;
+	struct drm_property	*colorkey_prop;
+	struct drm_property	*colorkey_min_prop;
+	struct drm_property	*colorkey_max_prop;
+	struct drm_property	*colorkey_val_prop;
+	struct drm_property	*colorkey_alpha_prop;
+	struct drm_property	*colorkey_mode_prop;
+	struct drm_property	*brightness_prop;
+	struct drm_property	*contrast_prop;
+	struct drm_property	*saturation_prop;
+#ifdef CONFIG_DEBUG_FS
+	struct dentry		*de;
+#endif
+};
+
+void __armada_drm_queue_unref_work(struct drm_device *,
+	struct drm_framebuffer *);
+void armada_drm_queue_unref_work(struct drm_device *,
+	struct drm_framebuffer *);
+
+extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
+
+int armada_fbdev_init(struct drm_device *);
+void armada_fbdev_fini(struct drm_device *);
+
+int armada_overlay_plane_create(struct drm_device *, unsigned long);
+
+int armada_drm_debugfs_init(struct drm_minor *);
+void armada_drm_debugfs_cleanup(struct drm_minor *);
+
+#endif

+ 421 - 0
drivers/gpu/drm/armada/armada_drv.c

@@ -0,0 +1,421 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+#include <drm/armada_drm.h>
+#include "armada_ioctlP.h"
+
+#ifdef CONFIG_DRM_ARMADA_TDA1998X
+#include <drm/i2c/tda998x.h>
+#include "armada_slave.h"
+
+static struct tda998x_encoder_params params = {
+	/* With 0x24, there is no translation between vp_out and int_vp
+	FB	LCD out	Pins	VIP	Int Vp
+	R:23:16	R:7:0	VPC7:0	7:0	7:0[R]
+	G:15:8	G:15:8	VPB7:0	23:16	23:16[G]
+	B:7:0	B:23:16	VPA7:0	15:8	15:8[B]
+	*/
+	.swap_a = 2,
+	.swap_b = 3,
+	.swap_c = 4,
+	.swap_d = 5,
+	.swap_e = 0,
+	.swap_f = 1,
+	.audio_cfg = BIT(2),
+	.audio_frame[1] = 1,
+	.audio_format = AFMT_SPDIF,
+	.audio_sample_rate = 44100,
+};
+
+static const struct armada_drm_slave_config tda19988_config = {
+	.i2c_adapter_id = 0,
+	.crtcs = 1 << 0, /* Only LCD0 at the moment */
+	.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT,
+	.interlace_allowed = true,
+	.info = {
+		.type = "tda998x",
+		.addr = 0x70,
+		.platform_data = &params,
+	},
+};
+#endif
+
+static void armada_drm_unref_work(struct work_struct *work)
+{
+	struct armada_private *priv =
+		container_of(work, struct armada_private, fb_unref_work);
+	struct drm_framebuffer *fb;
+
+	while (kfifo_get(&priv->fb_unref, &fb))
+		drm_framebuffer_unreference(fb);
+}
+
+/* Must be called with dev->event_lock held */
+void __armada_drm_queue_unref_work(struct drm_device *dev,
+	struct drm_framebuffer *fb)
+{
+	struct armada_private *priv = dev->dev_private;
+
+	/*
+	 * Yes, we really must jump through these hoops just to store a
+	 * _pointer_ to something into the kfifo.  This is utterly insane
+	 * and idiotic, because it kfifo requires the _data_ pointed to by
+	 * the pointer const, not the pointer itself.  Not only that, but
+	 * you have to pass a pointer _to_ the pointer you want stored.
+	 */
+	const struct drm_framebuffer *silly_api_alert = fb;
+	WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert));
+	schedule_work(&priv->fb_unref_work);
+}
+
+void armada_drm_queue_unref_work(struct drm_device *dev,
+	struct drm_framebuffer *fb)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	__armada_drm_queue_unref_work(dev, fb);
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static int armada_drm_load(struct drm_device *dev, unsigned long flags)
+{
+	const struct platform_device_id *id;
+	struct armada_private *priv;
+	struct resource *res[ARRAY_SIZE(priv->dcrtc)];
+	struct resource *mem = NULL;
+	int ret, n, i;
+
+	memset(res, 0, sizeof(res));
+
+	for (n = i = 0; ; n++) {
+		struct resource *r = platform_get_resource(dev->platformdev,
+							   IORESOURCE_MEM, n);
+		if (!r)
+			break;
+
+		/* Resources above 64K are graphics memory */
+		if (resource_size(r) > SZ_64K)
+			mem = r;
+		else if (i < ARRAY_SIZE(priv->dcrtc))
+			res[i++] = r;
+		else
+			return -EINVAL;
+	}
+
+	if (!res[0] || !mem)
+		return -ENXIO;
+
+	if (!devm_request_mem_region(dev->dev, mem->start,
+			resource_size(mem), "armada-drm"))
+		return -EBUSY;
+
+	priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		DRM_ERROR("failed to allocate private\n");
+		return -ENOMEM;
+	}
+
+	dev->dev_private = priv;
+
+	/* Get the implementation specific driver data. */
+	id = platform_get_device_id(dev->platformdev);
+	if (!id)
+		return -ENXIO;
+
+	priv->variant = (struct armada_variant *)id->driver_data;
+
+	ret = priv->variant->init(priv, dev->dev);
+	if (ret)
+		return ret;
+
+	INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
+	INIT_KFIFO(priv->fb_unref);
+
+	/* Mode setting support */
+	drm_mode_config_init(dev);
+	dev->mode_config.min_width = 320;
+	dev->mode_config.min_height = 200;
+
+	/*
+	 * With vscale enabled, the maximum width is 1920 due to the
+	 * 1920 by 3 lines RAM
+	 */
+	dev->mode_config.max_width = 1920;
+	dev->mode_config.max_height = 2048;
+
+	dev->mode_config.preferred_depth = 24;
+	dev->mode_config.funcs = &armada_drm_mode_config_funcs;
+	drm_mm_init(&priv->linear, mem->start, resource_size(mem));
+
+	/* Create all LCD controllers */
+	for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
+		if (!res[n])
+			break;
+
+		ret = armada_drm_crtc_create(dev, n, res[n]);
+		if (ret)
+			goto err_kms;
+	}
+
+#ifdef CONFIG_DRM_ARMADA_TDA1998X
+	ret = armada_drm_connector_slave_create(dev, &tda19988_config);
+	if (ret)
+		goto err_kms;
+#endif
+
+	ret = drm_vblank_init(dev, n);
+	if (ret)
+		goto err_kms;
+
+	ret = drm_irq_install(dev);
+	if (ret)
+		goto err_kms;
+
+	dev->vblank_disable_allowed = 1;
+
+	ret = armada_fbdev_init(dev);
+	if (ret)
+		goto err_irq;
+
+	drm_kms_helper_poll_init(dev);
+
+	return 0;
+
+ err_irq:
+	drm_irq_uninstall(dev);
+ err_kms:
+	drm_mode_config_cleanup(dev);
+	drm_mm_takedown(&priv->linear);
+	flush_work(&priv->fb_unref_work);
+
+	return ret;
+}
+
+static int armada_drm_unload(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+
+	drm_kms_helper_poll_fini(dev);
+	armada_fbdev_fini(dev);
+	drm_irq_uninstall(dev);
+	drm_mode_config_cleanup(dev);
+	drm_mm_takedown(&priv->linear);
+	flush_work(&priv->fb_unref_work);
+	dev->dev_private = NULL;
+
+	return 0;
+}
+
+void armada_drm_vbl_event_add(struct armada_crtc *dcrtc,
+	struct armada_vbl_event *evt)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dcrtc->irq_lock, flags);
+	if (list_empty(&evt->node)) {
+		list_add_tail(&evt->node, &dcrtc->vbl_list);
+
+		drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
+	}
+	spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+}
+
+void armada_drm_vbl_event_remove(struct armada_crtc *dcrtc,
+	struct armada_vbl_event *evt)
+{
+	if (!list_empty(&evt->node)) {
+		list_del_init(&evt->node);
+		drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+	}
+}
+
+void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *dcrtc,
+	struct armada_vbl_event *evt)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dcrtc->irq_lock, flags);
+	armada_drm_vbl_event_remove(dcrtc, evt);
+	spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+}
+
+/* These are called under the vbl_lock. */
+static int armada_drm_enable_vblank(struct drm_device *dev, int crtc)
+{
+	struct armada_private *priv = dev->dev_private;
+	armada_drm_crtc_enable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
+	return 0;
+}
+
+static void armada_drm_disable_vblank(struct drm_device *dev, int crtc)
+{
+	struct armada_private *priv = dev->dev_private;
+	armada_drm_crtc_disable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
+}
+
+static irqreturn_t armada_drm_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = arg;
+	struct armada_private *priv = dev->dev_private;
+	struct armada_crtc *dcrtc = priv->dcrtc[0];
+	uint32_t v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
+	irqreturn_t handled = IRQ_NONE;
+
+	/*
+	 * This is rediculous - rather than writing bits to clear, we
+	 * have to set the actual status register value.  This is racy.
+	 */
+	writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+
+	/* Mask out those interrupts we haven't enabled */
+	v = stat & dcrtc->irq_ena;
+
+	if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
+		armada_drm_crtc_irq(dcrtc, stat);
+		handled = IRQ_HANDLED;
+	}
+
+	return handled;
+}
+
+static int armada_drm_irq_postinstall(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+	struct armada_crtc *dcrtc = priv->dcrtc[0];
+
+	spin_lock_irq(&dev->vbl_lock);
+	writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+	writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+	spin_unlock_irq(&dev->vbl_lock);
+
+	return 0;
+}
+
+static void armada_drm_irq_uninstall(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+	struct armada_crtc *dcrtc = priv->dcrtc[0];
+
+	writel(0, dcrtc->base + LCD_SPU_IRQ_ENA);
+}
+
+static struct drm_ioctl_desc armada_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,
+		DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl,
+		DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl,
+		DRM_UNLOCKED),
+};
+
+static const struct file_operations armada_drm_fops = {
+	.owner			= THIS_MODULE,
+	.llseek			= no_llseek,
+	.read			= drm_read,
+	.poll			= drm_poll,
+	.unlocked_ioctl		= drm_ioctl,
+	.mmap			= drm_gem_mmap,
+	.open			= drm_open,
+	.release		= drm_release,
+};
+
+static struct drm_driver armada_drm_driver = {
+	.load			= armada_drm_load,
+	.open			= NULL,
+	.preclose		= NULL,
+	.postclose		= NULL,
+	.lastclose		= NULL,
+	.unload			= armada_drm_unload,
+	.get_vblank_counter	= drm_vblank_count,
+	.enable_vblank		= armada_drm_enable_vblank,
+	.disable_vblank		= armada_drm_disable_vblank,
+	.irq_handler		= armada_drm_irq_handler,
+	.irq_postinstall	= armada_drm_irq_postinstall,
+	.irq_uninstall		= armada_drm_irq_uninstall,
+#ifdef CONFIG_DEBUG_FS
+	.debugfs_init		= armada_drm_debugfs_init,
+	.debugfs_cleanup	= armada_drm_debugfs_cleanup,
+#endif
+	.gem_free_object	= armada_gem_free_object,
+	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle,
+	.gem_prime_export	= armada_gem_prime_export,
+	.gem_prime_import	= armada_gem_prime_import,
+	.dumb_create		= armada_gem_dumb_create,
+	.dumb_map_offset	= armada_gem_dumb_map_offset,
+	.dumb_destroy		= armada_gem_dumb_destroy,
+	.gem_vm_ops		= &armada_gem_vm_ops,
+	.major			= 1,
+	.minor			= 0,
+	.name			= "armada-drm",
+	.desc			= "Armada SoC DRM",
+	.date			= "20120730",
+	.driver_features	= DRIVER_GEM | DRIVER_MODESET |
+				  DRIVER_HAVE_IRQ | DRIVER_PRIME,
+	.ioctls			= armada_ioctls,
+	.fops			= &armada_drm_fops,
+};
+
+static int armada_drm_probe(struct platform_device *pdev)
+{
+	return drm_platform_init(&armada_drm_driver, pdev);
+}
+
+static int armada_drm_remove(struct platform_device *pdev)
+{
+	drm_platform_exit(&armada_drm_driver, pdev);
+	return 0;
+}
+
+static const struct platform_device_id armada_drm_platform_ids[] = {
+	{
+		.name		= "armada-drm",
+		.driver_data	= (unsigned long)&armada510_ops,
+	}, {
+		.name		= "armada-510-drm",
+		.driver_data	= (unsigned long)&armada510_ops,
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(platform, armada_drm_platform_ids);
+
+static struct platform_driver armada_drm_platform_driver = {
+	.probe	= armada_drm_probe,
+	.remove	= armada_drm_remove,
+	.driver	= {
+		.name	= "armada-drm",
+		.owner	= THIS_MODULE,
+	},
+	.id_table = armada_drm_platform_ids,
+};
+
+static int __init armada_drm_init(void)
+{
+	armada_drm_driver.num_ioctls = DRM_ARRAY_SIZE(armada_ioctls);
+	return platform_driver_register(&armada_drm_platform_driver);
+}
+module_init(armada_drm_init);
+
+static void __exit armada_drm_exit(void)
+{
+	platform_driver_unregister(&armada_drm_platform_driver);
+}
+module_exit(armada_drm_exit);
+
+MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
+MODULE_DESCRIPTION("Armada DRM Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:armada-drm");

+ 170 - 0
drivers/gpu/drm/armada/armada_fb.c

@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+
+static void armada_fb_destroy(struct drm_framebuffer *fb)
+{
+	struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
+
+	drm_framebuffer_cleanup(&dfb->fb);
+	drm_gem_object_unreference_unlocked(&dfb->obj->obj);
+	kfree(dfb);
+}
+
+static int armada_fb_create_handle(struct drm_framebuffer *fb,
+	struct drm_file *dfile, unsigned int *handle)
+{
+	struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
+	return drm_gem_handle_create(dfile, &dfb->obj->obj, handle);
+}
+
+static const struct drm_framebuffer_funcs armada_fb_funcs = {
+	.destroy	= armada_fb_destroy,
+	.create_handle	= armada_fb_create_handle,
+};
+
+struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
+	struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj)
+{
+	struct armada_framebuffer *dfb;
+	uint8_t format, config;
+	int ret;
+
+	switch (mode->pixel_format) {
+#define FMT(drm, fmt, mod)		\
+	case DRM_FORMAT_##drm:		\
+		format = CFG_##fmt;	\
+		config = mod;		\
+		break
+	FMT(RGB565,	565,		CFG_SWAPRB);
+	FMT(BGR565,	565,		0);
+	FMT(ARGB1555,	1555,		CFG_SWAPRB);
+	FMT(ABGR1555,	1555,		0);
+	FMT(RGB888,	888PACK,	CFG_SWAPRB);
+	FMT(BGR888,	888PACK,	0);
+	FMT(XRGB8888,	X888,		CFG_SWAPRB);
+	FMT(XBGR8888,	X888,		0);
+	FMT(ARGB8888,	8888,		CFG_SWAPRB);
+	FMT(ABGR8888,	8888,		0);
+	FMT(YUYV,	422PACK,	CFG_YUV2RGB | CFG_SWAPYU | CFG_SWAPUV);
+	FMT(UYVY,	422PACK,	CFG_YUV2RGB);
+	FMT(VYUY,	422PACK,	CFG_YUV2RGB | CFG_SWAPUV);
+	FMT(YVYU,	422PACK,	CFG_YUV2RGB | CFG_SWAPYU);
+	FMT(YUV422,	422,		CFG_YUV2RGB);
+	FMT(YVU422,	422,		CFG_YUV2RGB | CFG_SWAPUV);
+	FMT(YUV420,	420,		CFG_YUV2RGB);
+	FMT(YVU420,	420,		CFG_YUV2RGB | CFG_SWAPUV);
+	FMT(C8,		PSEUDO8,	0);
+#undef FMT
+	default:
+		return ERR_PTR(-EINVAL);
+	}
+
+	dfb = kzalloc(sizeof(*dfb), GFP_KERNEL);
+	if (!dfb) {
+		DRM_ERROR("failed to allocate Armada fb object\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	dfb->fmt = format;
+	dfb->mod = config;
+	dfb->obj = obj;
+
+	drm_helper_mode_fill_fb_struct(&dfb->fb, mode);
+
+	ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs);
+	if (ret) {
+		kfree(dfb);
+		return ERR_PTR(ret);
+	}
+
+	/*
+	 * Take a reference on our object as we're successful - the
+	 * caller already holds a reference, which keeps us safe for
+	 * the above call, but the caller will drop their reference
+	 * to it.  Hence we need to take our own reference.
+	 */
+	drm_gem_object_reference(&obj->obj);
+
+	return dfb;
+}
+
+static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
+	struct drm_file *dfile, struct drm_mode_fb_cmd2 *mode)
+{
+	struct armada_gem_object *obj;
+	struct armada_framebuffer *dfb;
+	int ret;
+
+	DRM_DEBUG_DRIVER("w%u h%u pf%08x f%u p%u,%u,%u\n",
+		mode->width, mode->height, mode->pixel_format,
+		mode->flags, mode->pitches[0], mode->pitches[1],
+		mode->pitches[2]);
+
+	/* We can only handle a single plane at the moment */
+	if (drm_format_num_planes(mode->pixel_format) > 1 &&
+	    (mode->handles[0] != mode->handles[1] ||
+	     mode->handles[0] != mode->handles[2])) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	obj = armada_gem_object_lookup(dev, dfile, mode->handles[0]);
+	if (!obj) {
+		ret = -ENOENT;
+		goto err;
+	}
+
+	if (obj->obj.import_attach && !obj->sgt) {
+		ret = armada_gem_map_import(obj);
+		if (ret)
+			goto err_unref;
+	}
+
+	/* Framebuffer objects must have a valid device address for scanout */
+	if (obj->dev_addr == DMA_ERROR_CODE) {
+		ret = -EINVAL;
+		goto err_unref;
+	}
+
+	dfb = armada_framebuffer_create(dev, mode, obj);
+	if (IS_ERR(dfb)) {
+		ret = PTR_ERR(dfb);
+		goto err;
+	}
+
+	drm_gem_object_unreference_unlocked(&obj->obj);
+
+	return &dfb->fb;
+
+ err_unref:
+	drm_gem_object_unreference_unlocked(&obj->obj);
+ err:
+	DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
+	return ERR_PTR(ret);
+}
+
+static void armada_output_poll_changed(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+	struct drm_fb_helper *fbh = priv->fbdev;
+
+	if (fbh)
+		drm_fb_helper_hotplug_event(fbh);
+}
+
+const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
+	.fb_create		= armada_fb_create,
+	.output_poll_changed	= armada_output_poll_changed,
+};

+ 24 - 0
drivers/gpu/drm/armada/armada_fb.h

@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_FB_H
+#define ARMADA_FB_H
+
+struct armada_framebuffer {
+	struct drm_framebuffer	fb;
+	struct armada_gem_object *obj;
+	uint8_t			fmt;
+	uint8_t			mod;
+};
+#define drm_fb_to_armada_fb(dfb) \
+	container_of(dfb, struct armada_framebuffer, fb)
+#define drm_fb_obj(fb) drm_fb_to_armada_fb(fb)->obj
+
+struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
+	struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
+
+#endif

+ 202 - 0
drivers/gpu/drm/armada/armada_fbdev.c

@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Written from the i915 driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+
+static /*const*/ struct fb_ops armada_fb_ops = {
+	.owner		= THIS_MODULE,
+	.fb_check_var	= drm_fb_helper_check_var,
+	.fb_set_par	= drm_fb_helper_set_par,
+	.fb_fillrect	= cfb_fillrect,
+	.fb_copyarea	= cfb_copyarea,
+	.fb_imageblit	= cfb_imageblit,
+	.fb_pan_display	= drm_fb_helper_pan_display,
+	.fb_blank	= drm_fb_helper_blank,
+	.fb_setcmap	= drm_fb_helper_setcmap,
+	.fb_debug_enter	= drm_fb_helper_debug_enter,
+	.fb_debug_leave	= drm_fb_helper_debug_leave,
+};
+
+static int armada_fb_create(struct drm_fb_helper *fbh,
+	struct drm_fb_helper_surface_size *sizes)
+{
+	struct drm_device *dev = fbh->dev;
+	struct drm_mode_fb_cmd2 mode;
+	struct armada_framebuffer *dfb;
+	struct armada_gem_object *obj;
+	struct fb_info *info;
+	int size, ret;
+	void *ptr;
+
+	memset(&mode, 0, sizeof(mode));
+	mode.width = sizes->surface_width;
+	mode.height = sizes->surface_height;
+	mode.pitches[0] = armada_pitch(mode.width, sizes->surface_bpp);
+	mode.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+					sizes->surface_depth);
+
+	size = mode.pitches[0] * mode.height;
+	obj = armada_gem_alloc_private_object(dev, size);
+	if (!obj) {
+		DRM_ERROR("failed to allocate fb memory\n");
+		return -ENOMEM;
+	}
+
+	ret = armada_gem_linear_back(dev, obj);
+	if (ret) {
+		drm_gem_object_unreference_unlocked(&obj->obj);
+		return ret;
+	}
+
+	ptr = armada_gem_map_object(dev, obj);
+	if (!ptr) {
+		drm_gem_object_unreference_unlocked(&obj->obj);
+		return -ENOMEM;
+	}
+
+	dfb = armada_framebuffer_create(dev, &mode, obj);
+
+	/*
+	 * A reference is now held by the framebuffer object if
+	 * successful, otherwise this drops the ref for the error path.
+	 */
+	drm_gem_object_unreference_unlocked(&obj->obj);
+
+	if (IS_ERR(dfb))
+		return PTR_ERR(dfb);
+
+	info = framebuffer_alloc(0, dev->dev);
+	if (!info) {
+		ret = -ENOMEM;
+		goto err_fballoc;
+	}
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		ret = -ENOMEM;
+		goto err_fbcmap;
+	}
+
+	strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
+	info->par = fbh;
+	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+	info->fbops = &armada_fb_ops;
+	info->fix.smem_start = obj->phys_addr;
+	info->fix.smem_len = obj->obj.size;
+	info->screen_size = obj->obj.size;
+	info->screen_base = ptr;
+	fbh->fb = &dfb->fb;
+	fbh->fbdev = info;
+	drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
+	drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
+
+	DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n",
+		dfb->fb.width, dfb->fb.height,
+		dfb->fb.bits_per_pixel, obj->phys_addr);
+
+	return 0;
+
+ err_fbcmap:
+	framebuffer_release(info);
+ err_fballoc:
+	dfb->fb.funcs->destroy(&dfb->fb);
+	return ret;
+}
+
+static int armada_fb_probe(struct drm_fb_helper *fbh,
+	struct drm_fb_helper_surface_size *sizes)
+{
+	int ret = 0;
+
+	if (!fbh->fb) {
+		ret = armada_fb_create(fbh, sizes);
+		if (ret == 0)
+			ret = 1;
+	}
+	return ret;
+}
+
+static struct drm_fb_helper_funcs armada_fb_helper_funcs = {
+	.gamma_set	= armada_drm_crtc_gamma_set,
+	.gamma_get	= armada_drm_crtc_gamma_get,
+	.fb_probe	= armada_fb_probe,
+};
+
+int armada_fbdev_init(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+	struct drm_fb_helper *fbh;
+	int ret;
+
+	fbh = devm_kzalloc(dev->dev, sizeof(*fbh), GFP_KERNEL);
+	if (!fbh)
+		return -ENOMEM;
+
+	priv->fbdev = fbh;
+
+	fbh->funcs = &armada_fb_helper_funcs;
+
+	ret = drm_fb_helper_init(dev, fbh, 1, 1);
+	if (ret) {
+		DRM_ERROR("failed to initialize drm fb helper\n");
+		goto err_fb_helper;
+	}
+
+	ret = drm_fb_helper_single_add_all_connectors(fbh);
+	if (ret) {
+		DRM_ERROR("failed to add fb connectors\n");
+		goto err_fb_setup;
+	}
+
+	ret = drm_fb_helper_initial_config(fbh, 32);
+	if (ret) {
+		DRM_ERROR("failed to set initial config\n");
+		goto err_fb_setup;
+	}
+
+	return 0;
+ err_fb_setup:
+	drm_fb_helper_fini(fbh);
+ err_fb_helper:
+	priv->fbdev = NULL;
+	return ret;
+}
+
+void armada_fbdev_fini(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+	struct drm_fb_helper *fbh = priv->fbdev;
+
+	if (fbh) {
+		struct fb_info *info = fbh->fbdev;
+
+		if (info) {
+			unregister_framebuffer(info);
+			if (info->cmap.len)
+				fb_dealloc_cmap(&info->cmap);
+			framebuffer_release(info);
+		}
+
+		if (fbh->fb)
+			fbh->fb->funcs->destroy(fbh->fb);
+
+		drm_fb_helper_fini(fbh);
+
+		priv->fbdev = NULL;
+	}
+}

+ 611 - 0
drivers/gpu/drm/armada/armada_gem.c

@@ -0,0 +1,611 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/shmem_fs.h>
+#include <drm/drmP.h>
+#include "armada_drm.h"
+#include "armada_gem.h"
+#include <drm/armada_drm.h>
+#include "armada_ioctlP.h"
+
+static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
+	unsigned long addr = (unsigned long)vmf->virtual_address;
+	unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
+	int ret;
+
+	pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
+	ret = vm_insert_pfn(vma, addr, pfn);
+
+	switch (ret) {
+	case 0:
+	case -EBUSY:
+		return VM_FAULT_NOPAGE;
+	case -ENOMEM:
+		return VM_FAULT_OOM;
+	default:
+		return VM_FAULT_SIGBUS;
+	}
+}
+
+const struct vm_operations_struct armada_gem_vm_ops = {
+	.fault	= armada_gem_vm_fault,
+	.open	= drm_gem_vm_open,
+	.close	= drm_gem_vm_close,
+};
+
+static size_t roundup_gem_size(size_t size)
+{
+	return roundup(size, PAGE_SIZE);
+}
+
+/* dev->struct_mutex is held here */
+void armada_gem_free_object(struct drm_gem_object *obj)
+{
+	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
+
+	DRM_DEBUG_DRIVER("release obj %p\n", dobj);
+
+	drm_gem_free_mmap_offset(&dobj->obj);
+
+	if (dobj->page) {
+		/* page backed memory */
+		unsigned int order = get_order(dobj->obj.size);
+		__free_pages(dobj->page, order);
+	} else if (dobj->linear) {
+		/* linear backed memory */
+		drm_mm_remove_node(dobj->linear);
+		kfree(dobj->linear);
+		if (dobj->addr)
+			iounmap(dobj->addr);
+	}
+
+	if (dobj->obj.import_attach) {
+		/* We only ever display imported data */
+		dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt,
+					 DMA_TO_DEVICE);
+		drm_prime_gem_destroy(&dobj->obj, NULL);
+	}
+
+	drm_gem_object_release(&dobj->obj);
+
+	kfree(dobj);
+}
+
+int
+armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
+{
+	struct armada_private *priv = dev->dev_private;
+	size_t size = obj->obj.size;
+
+	if (obj->page || obj->linear)
+		return 0;
+
+	/*
+	 * If it is a small allocation (typically cursor, which will
+	 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
+	 * Framebuffers will never be this small (our minimum size for
+	 * framebuffers is larger than this anyway.)  Such objects are
+	 * only accessed by the CPU so we don't need any special handing
+	 * here.
+	 */
+	if (size <= 8192) {
+		unsigned int order = get_order(size);
+		struct page *p = alloc_pages(GFP_KERNEL, order);
+
+		if (p) {
+			obj->addr = page_address(p);
+			obj->phys_addr = page_to_phys(p);
+			obj->page = p;
+
+			memset(obj->addr, 0, PAGE_ALIGN(size));
+		}
+	}
+
+	/*
+	 * We could grab something from CMA if it's enabled, but that
+	 * involves building in a problem:
+	 *
+	 * CMA's interface uses dma_alloc_coherent(), which provides us
+	 * with an CPU virtual address and a device address.
+	 *
+	 * The CPU virtual address may be either an address in the kernel
+	 * direct mapped region (for example, as it would be on x86) or
+	 * it may be remapped into another part of kernel memory space
+	 * (eg, as it would be on ARM.)  This means virt_to_phys() on the
+	 * returned virtual address is invalid depending on the architecture
+	 * implementation.
+	 *
+	 * The device address may also not be a physical address; it may
+	 * be that there is some kind of remapping between the device and
+	 * system RAM, which makes the use of the device address also
+	 * unsafe to re-use as a physical address.
+	 *
+	 * This makes DRM usage of dma_alloc_coherent() in a generic way
+	 * at best very questionable and unsafe.
+	 */
+
+	/* Otherwise, grab it from our linear allocation */
+	if (!obj->page) {
+		struct drm_mm_node *node;
+		unsigned align = min_t(unsigned, size, SZ_2M);
+		void __iomem *ptr;
+		int ret;
+
+		node = kzalloc(sizeof(*node), GFP_KERNEL);
+		if (!node)
+			return -ENOSPC;
+
+		mutex_lock(&dev->struct_mutex);
+		ret = drm_mm_insert_node(&priv->linear, node, size, align,
+					 DRM_MM_SEARCH_DEFAULT);
+		mutex_unlock(&dev->struct_mutex);
+		if (ret) {
+			kfree(node);
+			return ret;
+		}
+
+		obj->linear = node;
+
+		/* Ensure that the memory we're returning is cleared. */
+		ptr = ioremap_wc(obj->linear->start, size);
+		if (!ptr) {
+			mutex_lock(&dev->struct_mutex);
+			drm_mm_remove_node(obj->linear);
+			mutex_unlock(&dev->struct_mutex);
+			kfree(obj->linear);
+			obj->linear = NULL;
+			return -ENOMEM;
+		}
+
+		memset_io(ptr, 0, size);
+		iounmap(ptr);
+
+		obj->phys_addr = obj->linear->start;
+		obj->dev_addr = obj->linear->start;
+	}
+
+	DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n",
+			 obj, obj->phys_addr, obj->dev_addr);
+
+	return 0;
+}
+
+void *
+armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
+{
+	/* only linear objects need to be ioremap'd */
+	if (!dobj->addr && dobj->linear)
+		dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
+	return dobj->addr;
+}
+
+struct armada_gem_object *
+armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
+{
+	struct armada_gem_object *obj;
+
+	size = roundup_gem_size(size);
+
+	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+	if (!obj)
+		return NULL;
+
+	drm_gem_private_object_init(dev, &obj->obj, size);
+	obj->dev_addr = DMA_ERROR_CODE;
+
+	DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
+
+	return obj;
+}
+
+struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
+	size_t size)
+{
+	struct armada_gem_object *obj;
+	struct address_space *mapping;
+
+	size = roundup_gem_size(size);
+
+	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+	if (!obj)
+		return NULL;
+
+	if (drm_gem_object_init(dev, &obj->obj, size)) {
+		kfree(obj);
+		return NULL;
+	}
+
+	obj->dev_addr = DMA_ERROR_CODE;
+
+	mapping = obj->obj.filp->f_path.dentry->d_inode->i_mapping;
+	mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+
+	DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
+
+	return obj;
+}
+
+/* Dumb alloc support */
+int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+	struct drm_mode_create_dumb *args)
+{
+	struct armada_gem_object *dobj;
+	u32 handle;
+	size_t size;
+	int ret;
+
+	args->pitch = armada_pitch(args->width, args->bpp);
+	args->size = size = args->pitch * args->height;
+
+	dobj = armada_gem_alloc_private_object(dev, size);
+	if (dobj == NULL)
+		return -ENOMEM;
+
+	ret = armada_gem_linear_back(dev, dobj);
+	if (ret)
+		goto err;
+
+	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
+	if (ret)
+		goto err;
+
+	args->handle = handle;
+
+	/* drop reference from allocate - handle holds it now */
+	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
+ err:
+	drm_gem_object_unreference_unlocked(&dobj->obj);
+	return ret;
+}
+
+int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+	uint32_t handle, uint64_t *offset)
+{
+	struct armada_gem_object *obj;
+	int ret = 0;
+
+	mutex_lock(&dev->struct_mutex);
+	obj = armada_gem_object_lookup(dev, file, handle);
+	if (!obj) {
+		DRM_ERROR("failed to lookup gem object\n");
+		ret = -EINVAL;
+		goto err_unlock;
+	}
+
+	/* Don't allow imported objects to be mapped */
+	if (obj->obj.import_attach) {
+		ret = -EINVAL;
+		goto err_unlock;
+	}
+
+	ret = drm_gem_create_mmap_offset(&obj->obj);
+	if (ret == 0) {
+		*offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
+		DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
+	}
+
+	drm_gem_object_unreference(&obj->obj);
+ err_unlock:
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+	uint32_t handle)
+{
+	return drm_gem_handle_delete(file, handle);
+}
+
+/* Private driver gem ioctls */
+int armada_gem_create_ioctl(struct drm_device *dev, void *data,
+	struct drm_file *file)
+{
+	struct drm_armada_gem_create *args = data;
+	struct armada_gem_object *dobj;
+	size_t size;
+	u32 handle;
+	int ret;
+
+	if (args->size == 0)
+		return -ENOMEM;
+
+	size = args->size;
+
+	dobj = armada_gem_alloc_object(dev, size);
+	if (dobj == NULL)
+		return -ENOMEM;
+
+	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
+	if (ret)
+		goto err;
+
+	args->handle = handle;
+
+	/* drop reference from allocate - handle holds it now */
+	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
+ err:
+	drm_gem_object_unreference_unlocked(&dobj->obj);
+	return ret;
+}
+
+/* Map a shmem-backed object into process memory space */
+int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
+	struct drm_file *file)
+{
+	struct drm_armada_gem_mmap *args = data;
+	struct armada_gem_object *dobj;
+	unsigned long addr;
+
+	dobj = armada_gem_object_lookup(dev, file, args->handle);
+	if (dobj == NULL)
+		return -ENOENT;
+
+	if (!dobj->obj.filp) {
+		drm_gem_object_unreference(&dobj->obj);
+		return -EINVAL;
+	}
+
+	addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
+		       MAP_SHARED, args->offset);
+	drm_gem_object_unreference(&dobj->obj);
+	if (IS_ERR_VALUE(addr))
+		return addr;
+
+	args->addr = addr;
+
+	return 0;
+}
+
+int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+	struct drm_file *file)
+{
+	struct drm_armada_gem_pwrite *args = data;
+	struct armada_gem_object *dobj;
+	char __user *ptr;
+	int ret;
+
+	DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
+		args->handle, args->offset, args->size, args->ptr);
+
+	if (args->size == 0)
+		return 0;
+
+	ptr = (char __user *)(uintptr_t)args->ptr;
+
+	if (!access_ok(VERIFY_READ, ptr, args->size))
+		return -EFAULT;
+
+	ret = fault_in_multipages_readable(ptr, args->size);
+	if (ret)
+		return ret;
+
+	dobj = armada_gem_object_lookup(dev, file, args->handle);
+	if (dobj == NULL)
+		return -ENOENT;
+
+	/* Must be a kernel-mapped object */
+	if (!dobj->addr)
+		return -EINVAL;
+
+	if (args->offset > dobj->obj.size ||
+	    args->size > dobj->obj.size - args->offset) {
+		DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
+		ret = -EINVAL;
+		goto unref;
+	}
+
+	if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
+		ret = -EFAULT;
+	} else if (dobj->update) {
+		dobj->update(dobj->update_data);
+		ret = 0;
+	}
+
+ unref:
+	drm_gem_object_unreference_unlocked(&dobj->obj);
+	return ret;
+}
+
+/* Prime support */
+struct sg_table *
+armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
+	enum dma_data_direction dir)
+{
+	struct drm_gem_object *obj = attach->dmabuf->priv;
+	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
+	struct scatterlist *sg;
+	struct sg_table *sgt;
+	int i, num;
+
+	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+	if (!sgt)
+		return NULL;
+
+	if (dobj->obj.filp) {
+		struct address_space *mapping;
+		gfp_t gfp;
+		int count;
+
+		count = dobj->obj.size / PAGE_SIZE;
+		if (sg_alloc_table(sgt, count, GFP_KERNEL))
+			goto free_sgt;
+
+		mapping = file_inode(dobj->obj.filp)->i_mapping;
+		gfp = mapping_gfp_mask(mapping);
+
+		for_each_sg(sgt->sgl, sg, count, i) {
+			struct page *page;
+
+			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+			if (IS_ERR(page)) {
+				num = i;
+				goto release;
+			}
+
+			sg_set_page(sg, page, PAGE_SIZE, 0);
+		}
+
+		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
+			num = sgt->nents;
+			goto release;
+		}
+	} else if (dobj->page) {
+		/* Single contiguous page */
+		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+			goto free_sgt;
+
+		sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
+
+		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+			goto free_table;
+	} else if (dobj->linear) {
+		/* Single contiguous physical region - no struct page */
+		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+			goto free_sgt;
+		sg_dma_address(sgt->sgl) = dobj->dev_addr;
+		sg_dma_len(sgt->sgl) = dobj->obj.size;
+	} else {
+		goto free_sgt;
+	}
+	return sgt;
+
+ release:
+	for_each_sg(sgt->sgl, sg, num, i)
+		page_cache_release(sg_page(sg));
+ free_table:
+	sg_free_table(sgt);
+ free_sgt:
+	kfree(sgt);
+	return NULL;
+}
+
+static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
+	struct sg_table *sgt, enum dma_data_direction dir)
+{
+	struct drm_gem_object *obj = attach->dmabuf->priv;
+	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
+	int i;
+
+	if (!dobj->linear)
+		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+
+	if (dobj->obj.filp) {
+		struct scatterlist *sg;
+		for_each_sg(sgt->sgl, sg, sgt->nents, i)
+			page_cache_release(sg_page(sg));
+	}
+
+	sg_free_table(sgt);
+	kfree(sgt);
+}
+
+static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
+{
+	return NULL;
+}
+
+static void
+armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
+{
+}
+
+static int
+armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+	return -EINVAL;
+}
+
+static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
+	.map_dma_buf	= armada_gem_prime_map_dma_buf,
+	.unmap_dma_buf	= armada_gem_prime_unmap_dma_buf,
+	.release	= drm_gem_dmabuf_release,
+	.kmap_atomic	= armada_gem_dmabuf_no_kmap,
+	.kunmap_atomic	= armada_gem_dmabuf_no_kunmap,
+	.kmap		= armada_gem_dmabuf_no_kmap,
+	.kunmap		= armada_gem_dmabuf_no_kunmap,
+	.mmap		= armada_gem_dmabuf_mmap,
+};
+
+struct dma_buf *
+armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
+	int flags)
+{
+	return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size,
+			      O_RDWR);
+}
+
+struct drm_gem_object *
+armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
+{
+	struct dma_buf_attachment *attach;
+	struct armada_gem_object *dobj;
+
+	if (buf->ops == &armada_gem_prime_dmabuf_ops) {
+		struct drm_gem_object *obj = buf->priv;
+		if (obj->dev == dev) {
+			/*
+			 * Importing our own dmabuf(s) increases the
+			 * refcount on the gem object itself.
+			 */
+			drm_gem_object_reference(obj);
+			dma_buf_put(buf);
+			return obj;
+		}
+	}
+
+	attach = dma_buf_attach(buf, dev->dev);
+	if (IS_ERR(attach))
+		return ERR_CAST(attach);
+
+	dobj = armada_gem_alloc_private_object(dev, buf->size);
+	if (!dobj) {
+		dma_buf_detach(buf, attach);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	dobj->obj.import_attach = attach;
+
+	/*
+	 * Don't call dma_buf_map_attachment() here - it maps the
+	 * scatterlist immediately for DMA, and this is not always
+	 * an appropriate thing to do.
+	 */
+	return &dobj->obj;
+}
+
+int armada_gem_map_import(struct armada_gem_object *dobj)
+{
+	int ret;
+
+	dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
+					  DMA_TO_DEVICE);
+	if (!dobj->sgt) {
+		DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
+		return -EINVAL;
+	}
+	if (IS_ERR(dobj->sgt)) {
+		ret = PTR_ERR(dobj->sgt);
+		dobj->sgt = NULL;
+		DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
+		return ret;
+	}
+	if (dobj->sgt->nents > 1) {
+		DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
+		return -EINVAL;
+	}
+	if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
+		DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
+		return -EINVAL;
+	}
+	dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
+	return 0;
+}

+ 52 - 0
drivers/gpu/drm/armada/armada_gem.h

@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_GEM_H
+#define ARMADA_GEM_H
+
+/* GEM */
+struct armada_gem_object {
+	struct drm_gem_object	obj;
+	void			*addr;
+	phys_addr_t		phys_addr;
+	resource_size_t		dev_addr;
+	struct drm_mm_node	*linear;	/* for linear backed */
+	struct page		*page;		/* for page backed */
+	struct sg_table		*sgt;		/* for imported */
+	void			(*update)(void *);
+	void			*update_data;
+};
+
+extern const struct vm_operations_struct armada_gem_vm_ops;
+
+#define drm_to_armada_gem(o) container_of(o, struct armada_gem_object, obj)
+
+void armada_gem_free_object(struct drm_gem_object *);
+int armada_gem_linear_back(struct drm_device *, struct armada_gem_object *);
+void *armada_gem_map_object(struct drm_device *, struct armada_gem_object *);
+struct armada_gem_object *armada_gem_alloc_private_object(struct drm_device *,
+	size_t);
+int armada_gem_dumb_create(struct drm_file *, struct drm_device *,
+	struct drm_mode_create_dumb *);
+int armada_gem_dumb_map_offset(struct drm_file *, struct drm_device *,
+	uint32_t, uint64_t *);
+int armada_gem_dumb_destroy(struct drm_file *, struct drm_device *,
+	uint32_t);
+struct dma_buf *armada_gem_prime_export(struct drm_device *dev,
+	struct drm_gem_object *obj, int flags);
+struct drm_gem_object *armada_gem_prime_import(struct drm_device *,
+	struct dma_buf *);
+int armada_gem_map_import(struct armada_gem_object *);
+
+static inline struct armada_gem_object *armada_gem_object_lookup(
+	struct drm_device *dev, struct drm_file *dfile, unsigned handle)
+{
+	struct drm_gem_object *obj = drm_gem_object_lookup(dev, dfile, handle);
+
+	return obj ? drm_to_armada_gem(obj) : NULL;
+}
+#endif

+ 318 - 0
drivers/gpu/drm/armada/armada_hw.h

@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_HW_H
+#define ARMADA_HW_H
+
+/*
+ * Note: the following registers are written from IRQ context:
+ *  LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL
+ *  LCD_SPU_DMA_START_ADDR_[YUV][01], LCD_SPU_DMA_PITCH_YC,
+ *  LCD_SPU_DMA_PITCH_UV, LCD_SPU_DMA_OVSA_HPXL_VLN,
+ *  LCD_SPU_DMA_HPXL_VLN, LCD_SPU_DZM_HPXL_VLN, LCD_SPU_DMA_CTRL0
+ */
+enum {
+	LCD_SPU_ADV_REG			= 0x0084,	/* Armada 510 */
+	LCD_SPU_DMA_START_ADDR_Y0	= 0x00c0,
+	LCD_SPU_DMA_START_ADDR_U0	= 0x00c4,
+	LCD_SPU_DMA_START_ADDR_V0	= 0x00c8,
+	LCD_CFG_DMA_START_ADDR_0	= 0x00cc,
+	LCD_SPU_DMA_START_ADDR_Y1	= 0x00d0,
+	LCD_SPU_DMA_START_ADDR_U1	= 0x00d4,
+	LCD_SPU_DMA_START_ADDR_V1	= 0x00d8,
+	LCD_CFG_DMA_START_ADDR_1	= 0x00dc,
+	LCD_SPU_DMA_PITCH_YC		= 0x00e0,
+	LCD_SPU_DMA_PITCH_UV		= 0x00e4,
+	LCD_SPU_DMA_OVSA_HPXL_VLN	= 0x00e8,
+	LCD_SPU_DMA_HPXL_VLN		= 0x00ec,
+	LCD_SPU_DZM_HPXL_VLN		= 0x00f0,
+	LCD_CFG_GRA_START_ADDR0		= 0x00f4,
+	LCD_CFG_GRA_START_ADDR1		= 0x00f8,
+	LCD_CFG_GRA_PITCH		= 0x00fc,
+	LCD_SPU_GRA_OVSA_HPXL_VLN	= 0x0100,
+	LCD_SPU_GRA_HPXL_VLN		= 0x0104,
+	LCD_SPU_GZM_HPXL_VLN		= 0x0108,
+	LCD_SPU_HWC_OVSA_HPXL_VLN	= 0x010c,
+	LCD_SPU_HWC_HPXL_VLN		= 0x0110,
+	LCD_SPUT_V_H_TOTAL		= 0x0114,
+	LCD_SPU_V_H_ACTIVE		= 0x0118,
+	LCD_SPU_H_PORCH			= 0x011c,
+	LCD_SPU_V_PORCH			= 0x0120,
+	LCD_SPU_BLANKCOLOR		= 0x0124,
+	LCD_SPU_ALPHA_COLOR1		= 0x0128,
+	LCD_SPU_ALPHA_COLOR2		= 0x012c,
+	LCD_SPU_COLORKEY_Y		= 0x0130,
+	LCD_SPU_COLORKEY_U		= 0x0134,
+	LCD_SPU_COLORKEY_V		= 0x0138,
+	LCD_CFG_RDREG4F			= 0x013c,	/* Armada 510 */
+	LCD_SPU_SPI_RXDATA		= 0x0140,
+	LCD_SPU_ISA_RXDATA		= 0x0144,
+	LCD_SPU_HWC_RDDAT		= 0x0158,
+	LCD_SPU_GAMMA_RDDAT		= 0x015c,
+	LCD_SPU_PALETTE_RDDAT		= 0x0160,
+	LCD_SPU_IOPAD_IN		= 0x0178,
+	LCD_CFG_RDREG5F			= 0x017c,
+	LCD_SPU_SPI_CTRL		= 0x0180,
+	LCD_SPU_SPI_TXDATA		= 0x0184,
+	LCD_SPU_SMPN_CTRL		= 0x0188,
+	LCD_SPU_DMA_CTRL0		= 0x0190,
+	LCD_SPU_DMA_CTRL1		= 0x0194,
+	LCD_SPU_SRAM_CTRL		= 0x0198,
+	LCD_SPU_SRAM_WRDAT		= 0x019c,
+	LCD_SPU_SRAM_PARA0		= 0x01a0,	/* Armada 510 */
+	LCD_SPU_SRAM_PARA1		= 0x01a4,
+	LCD_CFG_SCLK_DIV		= 0x01a8,
+	LCD_SPU_CONTRAST		= 0x01ac,
+	LCD_SPU_SATURATION		= 0x01b0,
+	LCD_SPU_CBSH_HUE		= 0x01b4,
+	LCD_SPU_DUMB_CTRL		= 0x01b8,
+	LCD_SPU_IOPAD_CONTROL		= 0x01bc,
+	LCD_SPU_IRQ_ENA			= 0x01c0,
+	LCD_SPU_IRQ_ISR			= 0x01c4,
+};
+
+/* For LCD_SPU_ADV_REG */
+enum {
+	ADV_VSYNC_L_OFF	= 0xfff << 20,
+	ADV_GRACOLORKEY	= 1 << 19,
+	ADV_VIDCOLORKEY	= 1 << 18,
+	ADV_HWC32BLEND	= 1 << 15,
+	ADV_HWC32ARGB	= 1 << 14,
+	ADV_HWC32ENABLE	= 1 << 13,
+	ADV_VSYNCOFFEN	= 1 << 12,
+	ADV_VSYNC_H_OFF	= 0xfff << 0,
+};
+
+enum {
+	CFG_565		= 0,
+	CFG_1555	= 1,
+	CFG_888PACK	= 2,
+	CFG_X888	= 3,
+	CFG_8888	= 4,
+	CFG_422PACK	= 5,
+	CFG_422		= 6,
+	CFG_420		= 7,
+	CFG_PSEUDO4	= 9,
+	CFG_PSEUDO8	= 10,
+	CFG_SWAPRB	= 1 << 4,
+	CFG_SWAPUV	= 1 << 3,
+	CFG_SWAPYU	= 1 << 2,
+	CFG_YUV2RGB	= 1 << 1,
+};
+
+/* For LCD_SPU_DMA_CTRL0 */
+enum {
+	CFG_NOBLENDING	= 1 << 31,
+	CFG_GAMMA_ENA	= 1 << 30,
+	CFG_CBSH_ENA	= 1 << 29,
+	CFG_PALETTE_ENA	= 1 << 28,
+	CFG_ARBFAST_ENA	= 1 << 27,
+	CFG_HWC_1BITMOD	= 1 << 26,
+	CFG_HWC_1BITENA	= 1 << 25,
+	CFG_HWC_ENA	= 1 << 24,
+	CFG_DMAFORMAT	= 0xf << 20,
+#define	CFG_DMA_FMT(x)	((x) << 20)
+	CFG_GRAFORMAT	= 0xf << 16,
+#define	CFG_GRA_FMT(x)	((x) << 16)
+#define CFG_GRA_MOD(x)	((x) << 8)
+	CFG_GRA_FTOGGLE	= 1 << 15,
+	CFG_GRA_HSMOOTH	= 1 << 14,
+	CFG_GRA_TSTMODE	= 1 << 13,
+	CFG_GRA_ENA	= 1 << 8,
+#define CFG_DMA_MOD(x)	((x) << 0)
+	CFG_DMA_FTOGGLE	= 1 << 7,
+	CFG_DMA_HSMOOTH	= 1 << 6,
+	CFG_DMA_TSTMODE	= 1 << 5,
+	CFG_DMA_ENA	= 1 << 0,
+};
+
+enum {
+	CKMODE_DISABLE	= 0,
+	CKMODE_Y	= 1,
+	CKMODE_U	= 2,
+	CKMODE_RGB	= 3,
+	CKMODE_V	= 4,
+	CKMODE_R	= 5,
+	CKMODE_G	= 6,
+	CKMODE_B	= 7,
+};
+
+/* For LCD_SPU_DMA_CTRL1 */
+enum {
+	CFG_FRAME_TRIG		= 1 << 31,
+	CFG_VSYNC_INV		= 1 << 27,
+	CFG_CKMODE_MASK		= 0x7 << 24,
+#define CFG_CKMODE(x)		((x) << 24)
+	CFG_CARRY		= 1 << 23,
+	CFG_GATED_CLK		= 1 << 21,
+	CFG_PWRDN_ENA		= 1 << 20,
+	CFG_DSCALE_MASK		= 0x3 << 18,
+	CFG_DSCALE_NONE		= 0x0 << 18,
+	CFG_DSCALE_HALF		= 0x1 << 18,
+	CFG_DSCALE_QUAR		= 0x2 << 18,
+	CFG_ALPHAM_MASK		= 0x3 << 16,
+	CFG_ALPHAM_VIDEO	= 0x0 << 16,
+	CFG_ALPHAM_GRA		= 0x1 << 16,
+	CFG_ALPHAM_CFG		= 0x2 << 16,
+	CFG_ALPHA_MASK		= 0xff << 8,
+	CFG_PIXCMD_MASK		= 0xff,
+};
+
+/* For LCD_SPU_SRAM_CTRL */
+enum {
+	SRAM_READ	= 0 << 14,
+	SRAM_WRITE	= 2 << 14,
+	SRAM_INIT	= 3 << 14,
+	SRAM_HWC32_RAM1	= 0xc << 8,
+	SRAM_HWC32_RAM2	= 0xd << 8,
+	SRAM_HWC32_RAMR	= SRAM_HWC32_RAM1,
+	SRAM_HWC32_RAMG	= SRAM_HWC32_RAM2,
+	SRAM_HWC32_RAMB	= 0xe << 8,
+	SRAM_HWC32_TRAN	= 0xf << 8,
+	SRAM_HWC	= 0xf << 8,
+};
+
+/* For LCD_SPU_SRAM_PARA1 */
+enum {
+	CFG_CSB_256x32	= 1 << 15,	/* cursor */
+	CFG_CSB_256x24	= 1 << 14,	/* palette */
+	CFG_CSB_256x8	= 1 << 13,	/* gamma */
+	CFG_PDWN1920x32	= 1 << 8,	/* Armada 510: power down vscale ram */
+	CFG_PDWN256x32	= 1 << 7,	/* power down cursor */
+	CFG_PDWN256x24	= 1 << 6,	/* power down palette */
+	CFG_PDWN256x8	= 1 << 5,	/* power down gamma */
+	CFG_PDWNHWC	= 1 << 4,	/* Armada 510: power down all hwc ram */
+	CFG_PDWN32x32	= 1 << 3,	/* power down slave->smart ram */
+	CFG_PDWN16x66	= 1 << 2,	/* power down UV fifo */
+	CFG_PDWN32x66	= 1 << 1,	/* power down Y fifo */
+	CFG_PDWN64x66	= 1 << 0,	/* power down graphic fifo */
+};
+
+/* For LCD_CFG_SCLK_DIV */
+enum {
+	/* Armada 510 */
+	SCLK_510_AXI		= 0x0 << 30,
+	SCLK_510_EXTCLK0	= 0x1 << 30,
+	SCLK_510_PLL		= 0x2 << 30,
+	SCLK_510_EXTCLK1	= 0x3 << 30,
+	SCLK_510_DIV_CHANGE	= 1 << 29,
+	SCLK_510_FRAC_DIV_MASK	= 0xfff << 16,
+	SCLK_510_INT_DIV_MASK	= 0xffff << 0,
+
+	/* Armada 16x */
+	SCLK_16X_AHB		= 0x0 << 28,
+	SCLK_16X_PCLK		= 0x1 << 28,
+	SCLK_16X_AXI		= 0x4 << 28,
+	SCLK_16X_PLL		= 0x8 << 28,
+	SCLK_16X_FRAC_DIV_MASK	= 0xfff << 16,
+	SCLK_16X_INT_DIV_MASK	= 0xffff << 0,
+};
+
+/* For LCD_SPU_DUMB_CTRL */
+enum {
+	DUMB16_RGB565_0	= 0x0 << 28,
+	DUMB16_RGB565_1	= 0x1 << 28,
+	DUMB18_RGB666_0	= 0x2 << 28,
+	DUMB18_RGB666_1	= 0x3 << 28,
+	DUMB12_RGB444_0	= 0x4 << 28,
+	DUMB12_RGB444_1	= 0x5 << 28,
+	DUMB24_RGB888_0	= 0x6 << 28,
+	DUMB_BLANK	= 0x7 << 28,
+	DUMB_MASK	= 0xf << 28,
+	CFG_BIAS_OUT	= 1 << 8,
+	CFG_REV_RGB	= 1 << 7,
+	CFG_INV_CBLANK	= 1 << 6,
+	CFG_INV_CSYNC	= 1 << 5,	/* Normally active high */
+	CFG_INV_HENA	= 1 << 4,
+	CFG_INV_VSYNC	= 1 << 3,	/* Normally active high */
+	CFG_INV_HSYNC	= 1 << 2,	/* Normally active high */
+	CFG_INV_PCLK	= 1 << 1,
+	CFG_DUMB_ENA	= 1 << 0,
+};
+
+/* For LCD_SPU_IOPAD_CONTROL */
+enum {
+	CFG_VSCALE_LN_EN	= 3 << 18,
+	CFG_GRA_VM_ENA		= 1 << 15,
+	CFG_DMA_VM_ENA		= 1 << 13,
+	CFG_CMD_VM_ENA		= 1 << 11,
+	CFG_CSC_MASK		= 3 << 8,
+	CFG_CSC_YUV_CCIR709	= 1 << 9,
+	CFG_CSC_YUV_CCIR601	= 0 << 9,
+	CFG_CSC_RGB_STUDIO	= 1 << 8,
+	CFG_CSC_RGB_COMPUTER	= 0 << 8,
+	CFG_IOPAD_MASK		= 0xf << 0,
+	CFG_IOPAD_DUMB24	= 0x0 << 0,
+	CFG_IOPAD_DUMB18SPI	= 0x1 << 0,
+	CFG_IOPAD_DUMB18GPIO	= 0x2 << 0,
+	CFG_IOPAD_DUMB16SPI	= 0x3 << 0,
+	CFG_IOPAD_DUMB16GPIO	= 0x4 << 0,
+	CFG_IOPAD_DUMB12GPIO	= 0x5 << 0,
+	CFG_IOPAD_SMART18	= 0x6 << 0,
+	CFG_IOPAD_SMART16	= 0x7 << 0,
+	CFG_IOPAD_SMART8	= 0x8 << 0,
+};
+
+#define IOPAD_DUMB24                0x0
+
+/* For LCD_SPU_IRQ_ENA */
+enum {
+	DMA_FRAME_IRQ0_ENA	= 1 << 31,
+	DMA_FRAME_IRQ1_ENA	= 1 << 30,
+	DMA_FRAME_IRQ_ENA	= DMA_FRAME_IRQ0_ENA | DMA_FRAME_IRQ1_ENA,
+	DMA_FF_UNDERFLOW_ENA	= 1 << 29,
+	GRA_FRAME_IRQ0_ENA	= 1 << 27,
+	GRA_FRAME_IRQ1_ENA	= 1 << 26,
+	GRA_FRAME_IRQ_ENA	= GRA_FRAME_IRQ0_ENA | GRA_FRAME_IRQ1_ENA,
+	GRA_FF_UNDERFLOW_ENA	= 1 << 25,
+	VSYNC_IRQ_ENA		= 1 << 23,
+	DUMB_FRAMEDONE_ENA	= 1 << 22,
+	TWC_FRAMEDONE_ENA	= 1 << 21,
+	HWC_FRAMEDONE_ENA	= 1 << 20,
+	SLV_IRQ_ENA		= 1 << 19,
+	SPI_IRQ_ENA		= 1 << 18,
+	PWRDN_IRQ_ENA		= 1 << 17,
+	ERR_IRQ_ENA		= 1 << 16,
+	CLEAN_SPU_IRQ_ISR	= 0xffff,
+};
+
+/* For LCD_SPU_IRQ_ISR */
+enum {
+	DMA_FRAME_IRQ0		= 1 << 31,
+	DMA_FRAME_IRQ1		= 1 << 30,
+	DMA_FRAME_IRQ		= DMA_FRAME_IRQ0 | DMA_FRAME_IRQ1,
+	DMA_FF_UNDERFLOW	= 1 << 29,
+	GRA_FRAME_IRQ0		= 1 << 27,
+	GRA_FRAME_IRQ1		= 1 << 26,
+	GRA_FRAME_IRQ		= GRA_FRAME_IRQ0 | GRA_FRAME_IRQ1,
+	GRA_FF_UNDERFLOW	= 1 << 25,
+	VSYNC_IRQ		= 1 << 23,
+	DUMB_FRAMEDONE		= 1 << 22,
+	TWC_FRAMEDONE		= 1 << 21,
+	HWC_FRAMEDONE		= 1 << 20,
+	SLV_IRQ			= 1 << 19,
+	SPI_IRQ			= 1 << 18,
+	PWRDN_IRQ		= 1 << 17,
+	ERR_IRQ			= 1 << 16,
+	DMA_FRAME_IRQ0_LEVEL	= 1 << 15,
+	DMA_FRAME_IRQ1_LEVEL	= 1 << 14,
+	DMA_FRAME_CNT_ISR	= 3 << 12,
+	GRA_FRAME_IRQ0_LEVEL	= 1 << 11,
+	GRA_FRAME_IRQ1_LEVEL	= 1 << 10,
+	GRA_FRAME_CNT_ISR	= 3 << 8,
+	VSYNC_IRQ_LEVEL		= 1 << 7,
+	DUMB_FRAMEDONE_LEVEL	= 1 << 6,
+	TWC_FRAMEDONE_LEVEL	= 1 << 5,
+	HWC_FRAMEDONE_LEVEL	= 1 << 4,
+	SLV_FF_EMPTY		= 1 << 3,
+	DMA_FF_ALLEMPTY		= 1 << 2,
+	GRA_FF_ALLEMPTY		= 1 << 1,
+	PWRDN_IRQ_LEVEL		= 1 << 0,
+};
+
+#endif

+ 18 - 0
drivers/gpu/drm/armada/armada_ioctlP.h

@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_IOCTLP_H
+#define ARMADA_IOCTLP_H
+
+#define ARMADA_IOCTL_PROTO(name)\
+extern int armada_##name##_ioctl(struct drm_device *, void *, struct drm_file *)
+
+ARMADA_IOCTL_PROTO(gem_create);
+ARMADA_IOCTL_PROTO(gem_mmap);
+ARMADA_IOCTL_PROTO(gem_pwrite);
+
+#endif

+ 158 - 0
drivers/gpu/drm/armada/armada_output.c

@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder_slave.h>
+#include "armada_output.h"
+#include "armada_drm.h"
+
+struct armada_connector {
+	struct drm_connector conn;
+	const struct armada_output_type *type;
+};
+
+#define drm_to_armada_conn(c) container_of(c, struct armada_connector, conn)
+
+struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn)
+{
+	struct drm_encoder *enc = conn->encoder;
+
+	return enc ? enc : drm_encoder_find(conn->dev, conn->encoder_ids[0]);
+}
+
+static enum drm_connector_status armada_drm_connector_detect(
+	struct drm_connector *conn, bool force)
+{
+	struct armada_connector *dconn = drm_to_armada_conn(conn);
+	enum drm_connector_status status = connector_status_disconnected;
+
+	if (dconn->type->detect) {
+		status = dconn->type->detect(conn, force);
+	} else {
+		struct drm_encoder *enc = armada_drm_connector_encoder(conn);
+
+		if (enc)
+			status = encoder_helper_funcs(enc)->detect(enc, conn);
+	}
+
+	return status;
+}
+
+static void armada_drm_connector_destroy(struct drm_connector *conn)
+{
+	struct armada_connector *dconn = drm_to_armada_conn(conn);
+
+	drm_sysfs_connector_remove(conn);
+	drm_connector_cleanup(conn);
+	kfree(dconn);
+}
+
+static int armada_drm_connector_set_property(struct drm_connector *conn,
+	struct drm_property *property, uint64_t value)
+{
+	struct armada_connector *dconn = drm_to_armada_conn(conn);
+
+	if (!dconn->type->set_property)
+		return -EINVAL;
+
+	return dconn->type->set_property(conn, property, value);
+}
+
+static const struct drm_connector_funcs armada_drm_conn_funcs = {
+	.dpms		= drm_helper_connector_dpms,
+	.fill_modes	= drm_helper_probe_single_connector_modes,
+	.detect		= armada_drm_connector_detect,
+	.destroy	= armada_drm_connector_destroy,
+	.set_property	= armada_drm_connector_set_property,
+};
+
+void armada_drm_encoder_prepare(struct drm_encoder *encoder)
+{
+	encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void armada_drm_encoder_commit(struct drm_encoder *encoder)
+{
+	encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+	const struct drm_display_mode *mode, struct drm_display_mode *adjusted)
+{
+	return true;
+}
+
+/* Shouldn't this be a generic helper function? */
+int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
+	struct drm_display_mode *mode)
+{
+	struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
+	int valid = MODE_BAD;
+
+	if (encoder) {
+		struct drm_encoder_slave *slave = to_encoder_slave(encoder);
+
+		valid = slave->slave_funcs->mode_valid(encoder, mode);
+	}
+	return valid;
+}
+
+int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
+	struct drm_property *property, uint64_t value)
+{
+	struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
+	int rc = -EINVAL;
+
+	if (encoder) {
+		struct drm_encoder_slave *slave = to_encoder_slave(encoder);
+
+		rc = slave->slave_funcs->set_property(encoder, conn, property,
+						      value);
+	}
+	return rc;
+}
+
+int armada_output_create(struct drm_device *dev,
+	const struct armada_output_type *type, const void *data)
+{
+	struct armada_connector *dconn;
+	int ret;
+
+	dconn = kzalloc(sizeof(*dconn), GFP_KERNEL);
+	if (!dconn)
+		return -ENOMEM;
+
+	dconn->type = type;
+
+	ret = drm_connector_init(dev, &dconn->conn, &armada_drm_conn_funcs,
+				 type->connector_type);
+	if (ret) {
+		DRM_ERROR("unable to init connector\n");
+		goto err_destroy_dconn;
+	}
+
+	ret = type->create(&dconn->conn, data);
+	if (ret)
+		goto err_conn;
+
+	ret = drm_sysfs_connector_add(&dconn->conn);
+	if (ret)
+		goto err_sysfs;
+
+	return 0;
+
+ err_sysfs:
+	if (dconn->conn.encoder)
+		dconn->conn.encoder->funcs->destroy(dconn->conn.encoder);
+ err_conn:
+	drm_connector_cleanup(&dconn->conn);
+ err_destroy_dconn:
+	kfree(dconn);
+	return ret;
+}

+ 39 - 0
drivers/gpu/drm/armada/armada_output.h

@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_CONNETOR_H
+#define ARMADA_CONNETOR_H
+
+#define encoder_helper_funcs(encoder) \
+	((struct drm_encoder_helper_funcs *)encoder->helper_private)
+
+struct armada_output_type {
+	int connector_type;
+	enum drm_connector_status (*detect)(struct drm_connector *, bool);
+	int (*create)(struct drm_connector *, const void *);
+	int (*set_property)(struct drm_connector *, struct drm_property *,
+			    uint64_t);
+};
+
+struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn);
+
+void armada_drm_encoder_prepare(struct drm_encoder *encoder);
+void armada_drm_encoder_commit(struct drm_encoder *encoder);
+
+bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+	const struct drm_display_mode *mode, struct drm_display_mode *adj);
+
+int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
+	struct drm_display_mode *mode);
+
+int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
+	struct drm_property *property, uint64_t value);
+
+int armada_output_create(struct drm_device *dev,
+	const struct armada_output_type *type, const void *data);
+
+#endif

+ 477 - 0
drivers/gpu/drm/armada/armada_overlay.c

@@ -0,0 +1,477 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+#include <drm/armada_drm.h>
+#include "armada_ioctlP.h"
+
+struct armada_plane_properties {
+	uint32_t colorkey_yr;
+	uint32_t colorkey_ug;
+	uint32_t colorkey_vb;
+#define K2R(val) (((val) >> 0) & 0xff)
+#define K2G(val) (((val) >> 8) & 0xff)
+#define K2B(val) (((val) >> 16) & 0xff)
+	int16_t  brightness;
+	uint16_t contrast;
+	uint16_t saturation;
+	uint32_t colorkey_mode;
+};
+
+struct armada_plane {
+	struct drm_plane base;
+	spinlock_t lock;
+	struct drm_framebuffer *old_fb;
+	uint32_t src_hw;
+	uint32_t dst_hw;
+	uint32_t dst_yx;
+	uint32_t ctrl0;
+	struct {
+		struct armada_vbl_event update;
+		struct armada_regs regs[13];
+		wait_queue_head_t wait;
+	} vbl;
+	struct armada_plane_properties prop;
+};
+#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
+
+
+static void
+armada_ovl_update_attr(struct armada_plane_properties *prop,
+	struct armada_crtc *dcrtc)
+{
+	writel_relaxed(prop->colorkey_yr, dcrtc->base + LCD_SPU_COLORKEY_Y);
+	writel_relaxed(prop->colorkey_ug, dcrtc->base + LCD_SPU_COLORKEY_U);
+	writel_relaxed(prop->colorkey_vb, dcrtc->base + LCD_SPU_COLORKEY_V);
+
+	writel_relaxed(prop->brightness << 16 | prop->contrast,
+		       dcrtc->base + LCD_SPU_CONTRAST);
+	/* Docs say 15:0, but it seems to actually be 31:16 on Armada 510 */
+	writel_relaxed(prop->saturation << 16,
+		       dcrtc->base + LCD_SPU_SATURATION);
+	writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
+
+	spin_lock_irq(&dcrtc->irq_lock);
+	armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
+		     CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
+		     dcrtc->base + LCD_SPU_DMA_CTRL1);
+
+	armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
+	spin_unlock_irq(&dcrtc->irq_lock);
+}
+
+/* === Plane support === */
+static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data)
+{
+	struct armada_plane *dplane = data;
+	struct drm_framebuffer *fb;
+
+	armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs);
+
+	spin_lock(&dplane->lock);
+	fb = dplane->old_fb;
+	dplane->old_fb = NULL;
+	spin_unlock(&dplane->lock);
+
+	if (fb)
+		armada_drm_queue_unref_work(dcrtc->crtc.dev, fb);
+}
+
+static unsigned armada_limit(int start, unsigned size, unsigned max)
+{
+	int end = start + size;
+	if (end < 0)
+		return 0;
+	if (start < 0)
+		start = 0;
+	return (unsigned)end > max ? max - start : end - start;
+}
+
+static int
+armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+	struct drm_framebuffer *fb,
+	int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
+	uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h)
+{
+	struct armada_plane *dplane = drm_to_armada_plane(plane);
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	uint32_t val, ctrl0;
+	unsigned idx = 0;
+	int ret;
+
+	crtc_w = armada_limit(crtc_x, crtc_w, dcrtc->crtc.mode.hdisplay);
+	crtc_h = armada_limit(crtc_y, crtc_h, dcrtc->crtc.mode.vdisplay);
+	ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) |
+		CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) |
+		CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA;
+
+	/* Does the position/size result in nothing to display? */
+	if (crtc_w == 0 || crtc_h == 0) {
+		ctrl0 &= ~CFG_DMA_ENA;
+	}
+
+	/*
+	 * FIXME: if the starting point is off screen, we need to
+	 * adjust src_x, src_y, src_w, src_h appropriately, and
+	 * according to the scale.
+	 */
+
+	if (!dcrtc->plane) {
+		dcrtc->plane = plane;
+		armada_ovl_update_attr(&dplane->prop, dcrtc);
+	}
+
+	/* FIXME: overlay on an interlaced display */
+	/* Just updating the position/size? */
+	if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
+		val = (src_h & 0xffff0000) | src_w >> 16;
+		dplane->src_hw = val;
+		writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
+		val = crtc_h << 16 | crtc_w;
+		dplane->dst_hw = val;
+		writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
+		val = crtc_y << 16 | crtc_x;
+		dplane->dst_yx = val;
+		writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
+		return 0;
+	} else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
+		/* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
+		armada_updatel(0, CFG_PDWN16x66 | CFG_PDWN32x66,
+			       dcrtc->base + LCD_SPU_SRAM_PARA1);
+	}
+
+	ret = wait_event_timeout(dplane->vbl.wait,
+				 list_empty(&dplane->vbl.update.node),
+				 HZ/25);
+	if (ret < 0)
+		return ret;
+
+	if (plane->fb != fb) {
+		struct armada_gem_object *obj = drm_fb_obj(fb);
+		uint32_t sy, su, sv;
+
+		/*
+		 * Take a reference on the new framebuffer - we want to
+		 * hold on to it while the hardware is displaying it.
+		 */
+		drm_framebuffer_reference(fb);
+
+		if (plane->fb) {
+			struct drm_framebuffer *older_fb;
+
+			spin_lock_irq(&dplane->lock);
+			older_fb = dplane->old_fb;
+			dplane->old_fb = plane->fb;
+			spin_unlock_irq(&dplane->lock);
+			if (older_fb)
+				armada_drm_queue_unref_work(dcrtc->crtc.dev,
+							    older_fb);
+		}
+
+		src_y >>= 16;
+		src_x >>= 16;
+		sy = obj->dev_addr + fb->offsets[0] + src_y * fb->pitches[0] +
+			src_x * fb->bits_per_pixel / 8;
+		su = obj->dev_addr + fb->offsets[1] + src_y * fb->pitches[1] +
+			src_x;
+		sv = obj->dev_addr + fb->offsets[2] + src_y * fb->pitches[2] +
+			src_x;
+
+		armada_reg_queue_set(dplane->vbl.regs, idx, sy,
+				     LCD_SPU_DMA_START_ADDR_Y0);
+		armada_reg_queue_set(dplane->vbl.regs, idx, su,
+				     LCD_SPU_DMA_START_ADDR_U0);
+		armada_reg_queue_set(dplane->vbl.regs, idx, sv,
+				     LCD_SPU_DMA_START_ADDR_V0);
+		armada_reg_queue_set(dplane->vbl.regs, idx, sy,
+				     LCD_SPU_DMA_START_ADDR_Y1);
+		armada_reg_queue_set(dplane->vbl.regs, idx, su,
+				     LCD_SPU_DMA_START_ADDR_U1);
+		armada_reg_queue_set(dplane->vbl.regs, idx, sv,
+				     LCD_SPU_DMA_START_ADDR_V1);
+
+		val = fb->pitches[0] << 16 | fb->pitches[0];
+		armada_reg_queue_set(dplane->vbl.regs, idx, val,
+				     LCD_SPU_DMA_PITCH_YC);
+		val = fb->pitches[1] << 16 | fb->pitches[2];
+		armada_reg_queue_set(dplane->vbl.regs, idx, val,
+				     LCD_SPU_DMA_PITCH_UV);
+	}
+
+	val = (src_h & 0xffff0000) | src_w >> 16;
+	if (dplane->src_hw != val) {
+		dplane->src_hw = val;
+		armada_reg_queue_set(dplane->vbl.regs, idx, val,
+				     LCD_SPU_DMA_HPXL_VLN);
+	}
+	val = crtc_h << 16 | crtc_w;
+	if (dplane->dst_hw != val) {
+		dplane->dst_hw = val;
+		armada_reg_queue_set(dplane->vbl.regs, idx, val,
+				     LCD_SPU_DZM_HPXL_VLN);
+	}
+	val = crtc_y << 16 | crtc_x;
+	if (dplane->dst_yx != val) {
+		dplane->dst_yx = val;
+		armada_reg_queue_set(dplane->vbl.regs, idx, val,
+				     LCD_SPU_DMA_OVSA_HPXL_VLN);
+	}
+	if (dplane->ctrl0 != ctrl0) {
+		dplane->ctrl0 = ctrl0;
+		armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
+			CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE |
+			CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE |
+			CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV | CFG_SWAPYU |
+			CFG_YUV2RGB) | CFG_DMA_ENA,
+			LCD_SPU_DMA_CTRL0);
+	}
+	if (idx) {
+		armada_reg_queue_end(dplane->vbl.regs, idx);
+		armada_drm_vbl_event_add(dcrtc, &dplane->vbl.update);
+	}
+	return 0;
+}
+
+static int armada_plane_disable(struct drm_plane *plane)
+{
+	struct armada_plane *dplane = drm_to_armada_plane(plane);
+	struct drm_framebuffer *fb;
+	struct armada_crtc *dcrtc;
+
+	if (!dplane->base.crtc)
+		return 0;
+
+	dcrtc = drm_to_armada_crtc(dplane->base.crtc);
+	dcrtc->plane = NULL;
+
+	spin_lock_irq(&dcrtc->irq_lock);
+	armada_drm_vbl_event_remove(dcrtc, &dplane->vbl.update);
+	armada_updatel(0, CFG_DMA_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
+	dplane->ctrl0 = 0;
+	spin_unlock_irq(&dcrtc->irq_lock);
+
+	/* Power down the Y/U/V FIFOs */
+	armada_updatel(CFG_PDWN16x66 | CFG_PDWN32x66, 0,
+		       dcrtc->base + LCD_SPU_SRAM_PARA1);
+
+	if (plane->fb)
+		drm_framebuffer_unreference(plane->fb);
+
+	spin_lock_irq(&dplane->lock);
+	fb = dplane->old_fb;
+	dplane->old_fb = NULL;
+	spin_unlock_irq(&dplane->lock);
+	if (fb)
+		drm_framebuffer_unreference(fb);
+
+	return 0;
+}
+
+static void armada_plane_destroy(struct drm_plane *plane)
+{
+	kfree(plane);
+}
+
+static int armada_plane_set_property(struct drm_plane *plane,
+	struct drm_property *property, uint64_t val)
+{
+	struct armada_private *priv = plane->dev->dev_private;
+	struct armada_plane *dplane = drm_to_armada_plane(plane);
+	bool update_attr = false;
+
+	if (property == priv->colorkey_prop) {
+#define CCC(v) ((v) << 24 | (v) << 16 | (v) << 8)
+		dplane->prop.colorkey_yr = CCC(K2R(val));
+		dplane->prop.colorkey_ug = CCC(K2G(val));
+		dplane->prop.colorkey_vb = CCC(K2B(val));
+#undef CCC
+		update_attr = true;
+	} else if (property == priv->colorkey_min_prop) {
+		dplane->prop.colorkey_yr &= ~0x00ff0000;
+		dplane->prop.colorkey_yr |= K2R(val) << 16;
+		dplane->prop.colorkey_ug &= ~0x00ff0000;
+		dplane->prop.colorkey_ug |= K2G(val) << 16;
+		dplane->prop.colorkey_vb &= ~0x00ff0000;
+		dplane->prop.colorkey_vb |= K2B(val) << 16;
+		update_attr = true;
+	} else if (property == priv->colorkey_max_prop) {
+		dplane->prop.colorkey_yr &= ~0xff000000;
+		dplane->prop.colorkey_yr |= K2R(val) << 24;
+		dplane->prop.colorkey_ug &= ~0xff000000;
+		dplane->prop.colorkey_ug |= K2G(val) << 24;
+		dplane->prop.colorkey_vb &= ~0xff000000;
+		dplane->prop.colorkey_vb |= K2B(val) << 24;
+		update_attr = true;
+	} else if (property == priv->colorkey_val_prop) {
+		dplane->prop.colorkey_yr &= ~0x0000ff00;
+		dplane->prop.colorkey_yr |= K2R(val) << 8;
+		dplane->prop.colorkey_ug &= ~0x0000ff00;
+		dplane->prop.colorkey_ug |= K2G(val) << 8;
+		dplane->prop.colorkey_vb &= ~0x0000ff00;
+		dplane->prop.colorkey_vb |= K2B(val) << 8;
+		update_attr = true;
+	} else if (property == priv->colorkey_alpha_prop) {
+		dplane->prop.colorkey_yr &= ~0x000000ff;
+		dplane->prop.colorkey_yr |= K2R(val);
+		dplane->prop.colorkey_ug &= ~0x000000ff;
+		dplane->prop.colorkey_ug |= K2G(val);
+		dplane->prop.colorkey_vb &= ~0x000000ff;
+		dplane->prop.colorkey_vb |= K2B(val);
+		update_attr = true;
+	} else if (property == priv->colorkey_mode_prop) {
+		dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
+		dplane->prop.colorkey_mode |= CFG_CKMODE(val);
+		update_attr = true;
+	} else if (property == priv->brightness_prop) {
+		dplane->prop.brightness = val - 256;
+		update_attr = true;
+	} else if (property == priv->contrast_prop) {
+		dplane->prop.contrast = val;
+		update_attr = true;
+	} else if (property == priv->saturation_prop) {
+		dplane->prop.saturation = val;
+		update_attr = true;
+	}
+
+	if (update_attr && dplane->base.crtc)
+		armada_ovl_update_attr(&dplane->prop,
+				       drm_to_armada_crtc(dplane->base.crtc));
+
+	return 0;
+}
+
+static const struct drm_plane_funcs armada_plane_funcs = {
+	.update_plane	= armada_plane_update,
+	.disable_plane	= armada_plane_disable,
+	.destroy	= armada_plane_destroy,
+	.set_property	= armada_plane_set_property,
+};
+
+static const uint32_t armada_formats[] = {
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_YUV420,
+	DRM_FORMAT_YVU420,
+	DRM_FORMAT_YUV422,
+	DRM_FORMAT_YVU422,
+	DRM_FORMAT_VYUY,
+	DRM_FORMAT_YVYU,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_ABGR8888,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_RGB888,
+	DRM_FORMAT_BGR888,
+	DRM_FORMAT_ARGB1555,
+	DRM_FORMAT_ABGR1555,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_BGR565,
+};
+
+static struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = {
+	{ CKMODE_DISABLE, "disabled" },
+	{ CKMODE_Y,       "Y component" },
+	{ CKMODE_U,       "U component" },
+	{ CKMODE_V,       "V component" },
+	{ CKMODE_RGB,     "RGB" },
+	{ CKMODE_R,       "R component" },
+	{ CKMODE_G,       "G component" },
+	{ CKMODE_B,       "B component" },
+};
+
+static int armada_overlay_create_properties(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+
+	if (priv->colorkey_prop)
+		return 0;
+
+	priv->colorkey_prop = drm_property_create_range(dev, 0,
+				"colorkey", 0, 0xffffff);
+	priv->colorkey_min_prop = drm_property_create_range(dev, 0,
+				"colorkey_min", 0, 0xffffff);
+	priv->colorkey_max_prop = drm_property_create_range(dev, 0,
+				"colorkey_max", 0, 0xffffff);
+	priv->colorkey_val_prop = drm_property_create_range(dev, 0,
+				"colorkey_val", 0, 0xffffff);
+	priv->colorkey_alpha_prop = drm_property_create_range(dev, 0,
+				"colorkey_alpha", 0, 0xffffff);
+	priv->colorkey_mode_prop = drm_property_create_enum(dev, 0,
+				"colorkey_mode",
+				armada_drm_colorkey_enum_list,
+				ARRAY_SIZE(armada_drm_colorkey_enum_list));
+	priv->brightness_prop = drm_property_create_range(dev, 0,
+				"brightness", 0, 256 + 255);
+	priv->contrast_prop = drm_property_create_range(dev, 0,
+				"contrast", 0, 0x7fff);
+	priv->saturation_prop = drm_property_create_range(dev, 0,
+				"saturation", 0, 0x7fff);
+
+	if (!priv->colorkey_prop)
+		return -ENOMEM;
+
+	return 0;
+}
+
+int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
+{
+	struct armada_private *priv = dev->dev_private;
+	struct drm_mode_object *mobj;
+	struct armada_plane *dplane;
+	int ret;
+
+	ret = armada_overlay_create_properties(dev);
+	if (ret)
+		return ret;
+
+	dplane = kzalloc(sizeof(*dplane), GFP_KERNEL);
+	if (!dplane)
+		return -ENOMEM;
+
+	spin_lock_init(&dplane->lock);
+	init_waitqueue_head(&dplane->vbl.wait);
+	armada_drm_vbl_event_init(&dplane->vbl.update, armada_plane_vbl,
+				  dplane);
+
+	drm_plane_init(dev, &dplane->base, crtcs, &armada_plane_funcs,
+		       armada_formats, ARRAY_SIZE(armada_formats), false);
+
+	dplane->prop.colorkey_yr = 0xfefefe00;
+	dplane->prop.colorkey_ug = 0x01010100;
+	dplane->prop.colorkey_vb = 0x01010100;
+	dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
+	dplane->prop.brightness = 0;
+	dplane->prop.contrast = 0x4000;
+	dplane->prop.saturation = 0x4000;
+
+	mobj = &dplane->base.base;
+	drm_object_attach_property(mobj, priv->colorkey_prop,
+				   0x0101fe);
+	drm_object_attach_property(mobj, priv->colorkey_min_prop,
+				   0x0101fe);
+	drm_object_attach_property(mobj, priv->colorkey_max_prop,
+				   0x0101fe);
+	drm_object_attach_property(mobj, priv->colorkey_val_prop,
+				   0x0101fe);
+	drm_object_attach_property(mobj, priv->colorkey_alpha_prop,
+				   0x000000);
+	drm_object_attach_property(mobj, priv->colorkey_mode_prop,
+				   CKMODE_RGB);
+	drm_object_attach_property(mobj, priv->brightness_prop, 256);
+	drm_object_attach_property(mobj, priv->contrast_prop,
+				   dplane->prop.contrast);
+	drm_object_attach_property(mobj, priv->saturation_prop,
+				   dplane->prop.saturation);
+
+	return 0;
+}

+ 139 - 0
drivers/gpu/drm/armada/armada_slave.c

@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder_slave.h>
+#include "armada_drm.h"
+#include "armada_output.h"
+#include "armada_slave.h"
+
+static int armada_drm_slave_get_modes(struct drm_connector *conn)
+{
+	struct drm_encoder *enc = armada_drm_connector_encoder(conn);
+	int count = 0;
+
+	if (enc) {
+		struct drm_encoder_slave *slave = to_encoder_slave(enc);
+
+		count = slave->slave_funcs->get_modes(enc, conn);
+	}
+
+	return count;
+}
+
+static void armada_drm_slave_destroy(struct drm_encoder *enc)
+{
+	struct drm_encoder_slave *slave = to_encoder_slave(enc);
+	struct i2c_client *client = drm_i2c_encoder_get_client(enc);
+
+	if (slave->slave_funcs)
+		slave->slave_funcs->destroy(enc);
+	if (client)
+		i2c_put_adapter(client->adapter);
+
+	drm_encoder_cleanup(&slave->base);
+	kfree(slave);
+}
+
+static const struct drm_encoder_funcs armada_drm_slave_encoder_funcs = {
+	.destroy	= armada_drm_slave_destroy,
+};
+
+static const struct drm_connector_helper_funcs armada_drm_slave_helper_funcs = {
+	.get_modes	= armada_drm_slave_get_modes,
+	.mode_valid	= armada_drm_slave_encoder_mode_valid,
+	.best_encoder	= armada_drm_connector_encoder,
+};
+
+static const struct drm_encoder_helper_funcs drm_slave_encoder_helpers = {
+	.dpms = drm_i2c_encoder_dpms,
+	.save = drm_i2c_encoder_save,
+	.restore = drm_i2c_encoder_restore,
+	.mode_fixup = drm_i2c_encoder_mode_fixup,
+	.prepare = drm_i2c_encoder_prepare,
+	.commit = drm_i2c_encoder_commit,
+	.mode_set = drm_i2c_encoder_mode_set,
+	.detect = drm_i2c_encoder_detect,
+};
+
+static int
+armada_drm_conn_slave_create(struct drm_connector *conn, const void *data)
+{
+	const struct armada_drm_slave_config *config = data;
+	struct drm_encoder_slave *slave;
+	struct i2c_adapter *adap;
+	int ret;
+
+	conn->interlace_allowed = config->interlace_allowed;
+	conn->doublescan_allowed = config->doublescan_allowed;
+	conn->polled = config->polled;
+
+	drm_connector_helper_add(conn, &armada_drm_slave_helper_funcs);
+
+	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+	if (!slave)
+		return -ENOMEM;
+
+	slave->base.possible_crtcs = config->crtcs;
+
+	adap = i2c_get_adapter(config->i2c_adapter_id);
+	if (!adap) {
+		kfree(slave);
+		return -EPROBE_DEFER;
+	}
+
+	ret = drm_encoder_init(conn->dev, &slave->base,
+			       &armada_drm_slave_encoder_funcs,
+			       DRM_MODE_ENCODER_TMDS);
+	if (ret) {
+		DRM_ERROR("unable to init encoder\n");
+		i2c_put_adapter(adap);
+		kfree(slave);
+		return ret;
+	}
+
+	ret = drm_i2c_encoder_init(conn->dev, slave, adap, &config->info);
+	i2c_put_adapter(adap);
+	if (ret) {
+		DRM_ERROR("unable to init encoder slave\n");
+		armada_drm_slave_destroy(&slave->base);
+		return ret;
+	}
+
+	drm_encoder_helper_add(&slave->base, &drm_slave_encoder_helpers);
+
+	ret = slave->slave_funcs->create_resources(&slave->base, conn);
+	if (ret) {
+		armada_drm_slave_destroy(&slave->base);
+		return ret;
+	}
+
+	ret = drm_mode_connector_attach_encoder(conn, &slave->base);
+	if (ret) {
+		armada_drm_slave_destroy(&slave->base);
+		return ret;
+	}
+
+	conn->encoder = &slave->base;
+
+	return ret;
+}
+
+static const struct armada_output_type armada_drm_conn_slave = {
+	.connector_type	= DRM_MODE_CONNECTOR_HDMIA,
+	.create		= armada_drm_conn_slave_create,
+	.set_property	= armada_drm_slave_encoder_set_property,
+};
+
+int armada_drm_connector_slave_create(struct drm_device *dev,
+	const struct armada_drm_slave_config *config)
+{
+	return armada_output_create(dev, &armada_drm_conn_slave, config);
+}

+ 26 - 0
drivers/gpu/drm/armada/armada_slave.h

@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_SLAVE_H
+#define ARMADA_SLAVE_H
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+
+struct armada_drm_slave_config {
+	int i2c_adapter_id;
+	uint32_t crtcs;
+	uint8_t polled;
+	bool interlace_allowed;
+	bool doublescan_allowed;
+	struct i2c_board_info info;
+};
+
+int armada_drm_connector_slave_create(struct drm_device *dev,
+	const struct armada_drm_slave_config *);
+
+#endif

+ 1 - 0
drivers/gpu/drm/ast/Kconfig

@@ -6,6 +6,7 @@ config DRM_AST
 	select FB_SYS_FILLRECT
 	select FB_SYS_FILLRECT
 	select FB_SYS_IMAGEBLIT
 	select FB_SYS_IMAGEBLIT
 	select DRM_KMS_HELPER
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select DRM_TTM
 	select DRM_TTM
 	help
 	help
 	 Say yes for experimental AST GPU driver. Do not enable
 	 Say yes for experimental AST GPU driver. Do not enable

+ 0 - 1
drivers/gpu/drm/ast/ast_drv.c

@@ -211,7 +211,6 @@ static struct drm_driver driver = {
 	.minor = DRIVER_MINOR,
 	.minor = DRIVER_MINOR,
 	.patchlevel = DRIVER_PATCHLEVEL,
 	.patchlevel = DRIVER_PATCHLEVEL,
 
 
-	.gem_init_object = ast_gem_init_object,
 	.gem_free_object = ast_gem_free_object,
 	.gem_free_object = ast_gem_free_object,
 	.dumb_create = ast_dumb_create,
 	.dumb_create = ast_dumb_create,
 	.dumb_map_offset = ast_dumb_mmap_offset,
 	.dumb_map_offset = ast_dumb_mmap_offset,

+ 0 - 1
drivers/gpu/drm/ast/ast_drv.h

@@ -323,7 +323,6 @@ extern int ast_dumb_create(struct drm_file *file,
 			   struct drm_device *dev,
 			   struct drm_device *dev,
 			   struct drm_mode_create_dumb *args);
 			   struct drm_mode_create_dumb *args);
 
 
-extern int ast_gem_init_object(struct drm_gem_object *obj);
 extern void ast_gem_free_object(struct drm_gem_object *obj);
 extern void ast_gem_free_object(struct drm_gem_object *obj);
 extern int ast_dumb_mmap_offset(struct drm_file *file,
 extern int ast_dumb_mmap_offset(struct drm_file *file,
 				struct drm_device *dev,
 				struct drm_device *dev,

+ 0 - 6
drivers/gpu/drm/ast/ast_main.c

@@ -449,12 +449,6 @@ int ast_dumb_create(struct drm_file *file,
 	return 0;
 	return 0;
 }
 }
 
 
-int ast_gem_init_object(struct drm_gem_object *obj)
-{
-	BUG();
-	return 0;
-}
-
 void ast_bo_unref(struct ast_bo **bo)
 void ast_bo_unref(struct ast_bo **bo)
 {
 {
 	struct ttm_buffer_object *tbo;
 	struct ttm_buffer_object *tbo;

+ 1 - 0
drivers/gpu/drm/cirrus/Kconfig

@@ -5,6 +5,7 @@ config DRM_CIRRUS_QEMU
 	select FB_SYS_COPYAREA
 	select FB_SYS_COPYAREA
 	select FB_SYS_IMAGEBLIT
 	select FB_SYS_IMAGEBLIT
 	select DRM_KMS_HELPER
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select DRM_TTM
 	select DRM_TTM
 	help
 	help
 	 This is a KMS driver for emulated cirrus device in qemu.
 	 This is a KMS driver for emulated cirrus device in qemu.

+ 0 - 1
drivers/gpu/drm/cirrus/cirrus_drv.c

@@ -97,7 +97,6 @@ static struct drm_driver driver = {
 	.major = DRIVER_MAJOR,
 	.major = DRIVER_MAJOR,
 	.minor = DRIVER_MINOR,
 	.minor = DRIVER_MINOR,
 	.patchlevel = DRIVER_PATCHLEVEL,
 	.patchlevel = DRIVER_PATCHLEVEL,
-	.gem_init_object = cirrus_gem_init_object,
 	.gem_free_object = cirrus_gem_free_object,
 	.gem_free_object = cirrus_gem_free_object,
 	.dumb_create = cirrus_dumb_create,
 	.dumb_create = cirrus_dumb_create,
 	.dumb_map_offset = cirrus_dumb_mmap_offset,
 	.dumb_map_offset = cirrus_dumb_mmap_offset,

+ 0 - 1
drivers/gpu/drm/cirrus/cirrus_drv.h

@@ -191,7 +191,6 @@ int cirrus_device_init(struct cirrus_device *cdev,
 		      struct pci_dev *pdev,
 		      struct pci_dev *pdev,
 		      uint32_t flags);
 		      uint32_t flags);
 void cirrus_device_fini(struct cirrus_device *cdev);
 void cirrus_device_fini(struct cirrus_device *cdev);
-int cirrus_gem_init_object(struct drm_gem_object *obj);
 void cirrus_gem_free_object(struct drm_gem_object *obj);
 void cirrus_gem_free_object(struct drm_gem_object *obj);
 int cirrus_dumb_mmap_offset(struct drm_file *file,
 int cirrus_dumb_mmap_offset(struct drm_file *file,
 			    struct drm_device *dev,
 			    struct drm_device *dev,

+ 0 - 6
drivers/gpu/drm/cirrus/cirrus_main.c

@@ -255,12 +255,6 @@ int cirrus_dumb_create(struct drm_file *file,
 	return 0;
 	return 0;
 }
 }
 
 
-int cirrus_gem_init_object(struct drm_gem_object *obj)
-{
-	BUG();
-	return 0;
-}
-
 void cirrus_bo_unref(struct cirrus_bo **bo)
 void cirrus_bo_unref(struct cirrus_bo **bo)
 {
 {
 	struct ttm_buffer_object *tbo;
 	struct ttm_buffer_object *tbo;

+ 5 - 6
drivers/gpu/drm/cirrus/cirrus_mode.c

@@ -494,13 +494,12 @@ static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
 
 
 int cirrus_vga_get_modes(struct drm_connector *connector)
 int cirrus_vga_get_modes(struct drm_connector *connector)
 {
 {
-	/* Just add a static list of modes */
-	drm_add_modes_noedid(connector, 640, 480);
-	drm_add_modes_noedid(connector, 800, 600);
-	drm_add_modes_noedid(connector, 1024, 768);
-	drm_add_modes_noedid(connector, 1280, 1024);
+	int count;
 
 
-	return 4;
+	/* Just add a static list of modes */
+	count = drm_add_modes_noedid(connector, 1280, 1024);
+	drm_set_preferred_mode(connector, 1024, 768);
+	return count;
 }
 }
 
 
 static int cirrus_vga_mode_valid(struct drm_connector *connector,
 static int cirrus_vga_mode_valid(struct drm_connector *connector,

+ 0 - 2
drivers/gpu/drm/drm_context.c

@@ -334,7 +334,6 @@ int drm_addctx(struct drm_device *dev, void *data,
 
 
 	mutex_lock(&dev->ctxlist_mutex);
 	mutex_lock(&dev->ctxlist_mutex);
 	list_add(&ctx_entry->head, &dev->ctxlist);
 	list_add(&ctx_entry->head, &dev->ctxlist);
-	++dev->ctx_count;
 	mutex_unlock(&dev->ctxlist_mutex);
 	mutex_unlock(&dev->ctxlist_mutex);
 
 
 	return 0;
 	return 0;
@@ -432,7 +431,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
 			if (pos->handle == ctx->handle) {
 			if (pos->handle == ctx->handle) {
 				list_del(&pos->head);
 				list_del(&pos->head);
 				kfree(pos);
 				kfree(pos);
-				--dev->ctx_count;
 			}
 			}
 		}
 		}
 	}
 	}

+ 98 - 55
drivers/gpu/drm/drm_crtc.c

@@ -202,6 +202,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
 	{ DRM_MODE_CONNECTOR_TV, "TV" },
 	{ DRM_MODE_CONNECTOR_TV, "TV" },
 	{ DRM_MODE_CONNECTOR_eDP, "eDP" },
 	{ DRM_MODE_CONNECTOR_eDP, "eDP" },
 	{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
 	{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
+	{ DRM_MODE_CONNECTOR_DSI, "DSI" },
 };
 };
 
 
 static const struct drm_prop_enum_list drm_encoder_enum_list[] =
 static const struct drm_prop_enum_list drm_encoder_enum_list[] =
@@ -211,6 +212,7 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
 	{ DRM_MODE_ENCODER_LVDS, "LVDS" },
 	{ DRM_MODE_ENCODER_LVDS, "LVDS" },
 	{ DRM_MODE_ENCODER_TVDAC, "TV" },
 	{ DRM_MODE_ENCODER_TVDAC, "TV" },
 	{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
 	{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
+	{ DRM_MODE_ENCODER_DSI, "DSI" },
 };
 };
 
 
 void drm_connector_ida_init(void)
 void drm_connector_ida_init(void)
@@ -1301,7 +1303,7 @@ static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
 }
 }
 
 
 /**
 /**
- * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
+ * drm_crtc_convert_umode - convert a modeinfo into a drm_display_mode
  * @out: drm_display_mode to return to the user
  * @out: drm_display_mode to return to the user
  * @in: drm_mode_modeinfo to use
  * @in: drm_mode_modeinfo to use
  *
  *
@@ -1317,6 +1319,9 @@ static int drm_crtc_convert_umode(struct drm_display_mode *out,
 	if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
 	if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
 		return -ERANGE;
 		return -ERANGE;
 
 
+	if ((in->flags & DRM_MODE_FLAG_3D_MASK) > DRM_MODE_FLAG_3D_MAX)
+		return -EINVAL;
+
 	out->clock = in->clock;
 	out->clock = in->clock;
 	out->hdisplay = in->hdisplay;
 	out->hdisplay = in->hdisplay;
 	out->hsync_start = in->hsync_start;
 	out->hsync_start = in->hsync_start;
@@ -1552,7 +1557,7 @@ int drm_mode_getcrtc(struct drm_device *dev,
 	obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
 	obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
 				   DRM_MODE_OBJECT_CRTC);
 				   DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 		goto out;
 	}
 	}
 	crtc = obj_to_crtc(obj);
 	crtc = obj_to_crtc(obj);
@@ -1579,6 +1584,19 @@ out:
 	return ret;
 	return ret;
 }
 }
 
 
+static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
+					 const struct drm_file *file_priv)
+{
+	/*
+	 * If user-space hasn't configured the driver to expose the stereo 3D
+	 * modes, don't expose them.
+	 */
+	if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
+		return false;
+
+	return true;
+}
+
 /**
 /**
  * drm_mode_getconnector - get connector configuration
  * drm_mode_getconnector - get connector configuration
  * @dev: drm device for the ioctl
  * @dev: drm device for the ioctl
@@ -1623,7 +1641,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
 	obj = drm_mode_object_find(dev, out_resp->connector_id,
 	obj = drm_mode_object_find(dev, out_resp->connector_id,
 				   DRM_MODE_OBJECT_CONNECTOR);
 				   DRM_MODE_OBJECT_CONNECTOR);
 	if (!obj) {
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 		goto out;
 	}
 	}
 	connector = obj_to_connector(obj);
 	connector = obj_to_connector(obj);
@@ -1644,7 +1662,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
 
 
 	/* delayed so we get modes regardless of pre-fill_modes state */
 	/* delayed so we get modes regardless of pre-fill_modes state */
 	list_for_each_entry(mode, &connector->modes, head)
 	list_for_each_entry(mode, &connector->modes, head)
-		mode_count++;
+		if (drm_mode_expose_to_userspace(mode, file_priv))
+			mode_count++;
 
 
 	out_resp->connector_id = connector->base.id;
 	out_resp->connector_id = connector->base.id;
 	out_resp->connector_type = connector->connector_type;
 	out_resp->connector_type = connector->connector_type;
@@ -1666,6 +1685,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
 		copied = 0;
 		copied = 0;
 		mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
 		mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
 		list_for_each_entry(mode, &connector->modes, head) {
 		list_for_each_entry(mode, &connector->modes, head) {
+			if (!drm_mode_expose_to_userspace(mode, file_priv))
+				continue;
+
 			drm_crtc_convert_to_umode(&u_mode, mode);
 			drm_crtc_convert_to_umode(&u_mode, mode);
 			if (copy_to_user(mode_ptr + copied,
 			if (copy_to_user(mode_ptr + copied,
 					 &u_mode, sizeof(u_mode))) {
 					 &u_mode, sizeof(u_mode))) {
@@ -1735,7 +1757,7 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
 	obj = drm_mode_object_find(dev, enc_resp->encoder_id,
 	obj = drm_mode_object_find(dev, enc_resp->encoder_id,
 				   DRM_MODE_OBJECT_ENCODER);
 				   DRM_MODE_OBJECT_ENCODER);
 	if (!obj) {
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 		goto out;
 	}
 	}
 	encoder = obj_to_encoder(obj);
 	encoder = obj_to_encoder(obj);
@@ -2040,6 +2062,45 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
 }
 }
 EXPORT_SYMBOL(drm_mode_set_config_internal);
 EXPORT_SYMBOL(drm_mode_set_config_internal);
 
 
+/*
+ * Checks that the framebuffer is big enough for the CRTC viewport
+ * (x, y, hdisplay, vdisplay)
+ */
+static int drm_crtc_check_viewport(const struct drm_crtc *crtc,
+				   int x, int y,
+				   const struct drm_display_mode *mode,
+				   const struct drm_framebuffer *fb)
+
+{
+	int hdisplay, vdisplay;
+
+	hdisplay = mode->hdisplay;
+	vdisplay = mode->vdisplay;
+
+	if (drm_mode_is_stereo(mode)) {
+		struct drm_display_mode adjusted = *mode;
+
+		drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE);
+		hdisplay = adjusted.crtc_hdisplay;
+		vdisplay = adjusted.crtc_vdisplay;
+	}
+
+	if (crtc->invert_dimensions)
+		swap(hdisplay, vdisplay);
+
+	if (hdisplay > fb->width ||
+	    vdisplay > fb->height ||
+	    x > fb->width - hdisplay ||
+	    y > fb->height - vdisplay) {
+		DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+			      fb->width, fb->height, hdisplay, vdisplay, x, y,
+			      crtc->invert_dimensions ? " (inverted)" : "");
+		return -ENOSPC;
+	}
+
+	return 0;
+}
+
 /**
 /**
  * drm_mode_setcrtc - set CRTC configuration
  * drm_mode_setcrtc - set CRTC configuration
  * @dev: drm device for the ioctl
  * @dev: drm device for the ioctl
@@ -2080,14 +2141,13 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
 				   DRM_MODE_OBJECT_CRTC);
 				   DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
 	if (!obj) {
 		DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
 		DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 		goto out;
 	}
 	}
 	crtc = obj_to_crtc(obj);
 	crtc = obj_to_crtc(obj);
 	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 
 
 	if (crtc_req->mode_valid) {
 	if (crtc_req->mode_valid) {
-		int hdisplay, vdisplay;
 		/* If we have a mode we need a framebuffer. */
 		/* If we have a mode we need a framebuffer. */
 		/* If we pass -1, set the mode with the currently bound fb */
 		/* If we pass -1, set the mode with the currently bound fb */
 		if (crtc_req->fb_id == -1) {
 		if (crtc_req->fb_id == -1) {
@@ -2104,7 +2164,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
 			if (!fb) {
 			if (!fb) {
 				DRM_DEBUG_KMS("Unknown FB ID%d\n",
 				DRM_DEBUG_KMS("Unknown FB ID%d\n",
 						crtc_req->fb_id);
 						crtc_req->fb_id);
-				ret = -EINVAL;
+				ret = -ENOENT;
 				goto out;
 				goto out;
 			}
 			}
 		}
 		}
@@ -2123,23 +2183,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
 
 
 		drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 		drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 
 
-		hdisplay = mode->hdisplay;
-		vdisplay = mode->vdisplay;
-
-		if (crtc->invert_dimensions)
-			swap(hdisplay, vdisplay);
-
-		if (hdisplay > fb->width ||
-		    vdisplay > fb->height ||
-		    crtc_req->x > fb->width - hdisplay ||
-		    crtc_req->y > fb->height - vdisplay) {
-			DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
-				      fb->width, fb->height,
-				      hdisplay, vdisplay, crtc_req->x, crtc_req->y,
-				      crtc->invert_dimensions ? " (inverted)" : "");
-			ret = -ENOSPC;
+		ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
+					      mode, fb);
+		if (ret)
 			goto out;
 			goto out;
-		}
+
 	}
 	}
 
 
 	if (crtc_req->count_connectors == 0 && mode) {
 	if (crtc_req->count_connectors == 0 && mode) {
@@ -2184,7 +2232,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
 			if (!obj) {
 			if (!obj) {
 				DRM_DEBUG_KMS("Connector id %d unknown\n",
 				DRM_DEBUG_KMS("Connector id %d unknown\n",
 						out_id);
 						out_id);
-				ret = -EINVAL;
+				ret = -ENOENT;
 				goto out;
 				goto out;
 			}
 			}
 			connector = obj_to_connector(obj);
 			connector = obj_to_connector(obj);
@@ -2232,7 +2280,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
 	obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
 	obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
 	if (!obj) {
 		DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
 		DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
-		return -EINVAL;
+		return -ENOENT;
 	}
 	}
 	crtc = obj_to_crtc(obj);
 	crtc = obj_to_crtc(obj);
 
 
@@ -2441,6 +2489,8 @@ static int format_check(const struct drm_mode_fb_cmd2 *r)
 	case DRM_FORMAT_YVU444:
 	case DRM_FORMAT_YVU444:
 		return 0;
 		return 0;
 	default:
 	default:
+		DRM_DEBUG_KMS("invalid pixel format %s\n",
+			      drm_get_format_name(r->pixel_format));
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 }
 }
@@ -2606,7 +2656,7 @@ fail_lookup:
 	mutex_unlock(&dev->mode_config.fb_lock);
 	mutex_unlock(&dev->mode_config.fb_lock);
 	mutex_unlock(&file_priv->fbs_lock);
 	mutex_unlock(&file_priv->fbs_lock);
 
 
-	return -EINVAL;
+	return -ENOENT;
 }
 }
 
 
 /**
 /**
@@ -2634,7 +2684,7 @@ int drm_mode_getfb(struct drm_device *dev,
 
 
 	fb = drm_framebuffer_lookup(dev, r->fb_id);
 	fb = drm_framebuffer_lookup(dev, r->fb_id);
 	if (!fb)
 	if (!fb)
-		return -EINVAL;
+		return -ENOENT;
 
 
 	r->height = fb->height;
 	r->height = fb->height;
 	r->width = fb->width;
 	r->width = fb->width;
@@ -2679,7 +2729,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
 
 
 	fb = drm_framebuffer_lookup(dev, r->fb_id);
 	fb = drm_framebuffer_lookup(dev, r->fb_id);
 	if (!fb)
 	if (!fb)
-		return -EINVAL;
+		return -ENOENT;
 
 
 	num_clips = r->num_clips;
 	num_clips = r->num_clips;
 	clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
 	clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
@@ -3011,7 +3061,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
 	drm_modeset_lock_all(dev);
 	drm_modeset_lock_all(dev);
 	obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
 	obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
 	if (!obj) {
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto done;
 		goto done;
 	}
 	}
 	property = obj_to_property(obj);
 	property = obj_to_property(obj);
@@ -3140,7 +3190,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
 	drm_modeset_lock_all(dev);
 	drm_modeset_lock_all(dev);
 	obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
 	obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
 	if (!obj) {
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto done;
 		goto done;
 	}
 	}
 	blob = obj_to_blob(obj);
 	blob = obj_to_blob(obj);
@@ -3301,7 +3351,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
 
 
 	obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
 	obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
 	if (!obj) {
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 		goto out;
 	}
 	}
 	if (!obj->properties) {
 	if (!obj->properties) {
@@ -3354,8 +3404,10 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
 	drm_modeset_lock_all(dev);
 	drm_modeset_lock_all(dev);
 
 
 	arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
 	arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
-	if (!arg_obj)
+	if (!arg_obj) {
+		ret = -ENOENT;
 		goto out;
 		goto out;
+	}
 	if (!arg_obj->properties)
 	if (!arg_obj->properties)
 		goto out;
 		goto out;
 
 
@@ -3368,8 +3420,10 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
 
 
 	prop_obj = drm_mode_object_find(dev, arg->prop_id,
 	prop_obj = drm_mode_object_find(dev, arg->prop_id,
 					DRM_MODE_OBJECT_PROPERTY);
 					DRM_MODE_OBJECT_PROPERTY);
-	if (!prop_obj)
+	if (!prop_obj) {
+		ret = -ENOENT;
 		goto out;
 		goto out;
+	}
 	property = obj_to_property(prop_obj);
 	property = obj_to_property(prop_obj);
 
 
 	if (!drm_property_change_is_valid(property, arg->value))
 	if (!drm_property_change_is_valid(property, arg->value))
@@ -3454,7 +3508,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
 	drm_modeset_lock_all(dev);
 	drm_modeset_lock_all(dev);
 	obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
 	obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 		goto out;
 	}
 	}
 	crtc = obj_to_crtc(obj);
 	crtc = obj_to_crtc(obj);
@@ -3513,7 +3567,7 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
 	drm_modeset_lock_all(dev);
 	drm_modeset_lock_all(dev);
 	obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
 	obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 		goto out;
 	}
 	}
 	crtc = obj_to_crtc(obj);
 	crtc = obj_to_crtc(obj);
@@ -3556,7 +3610,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
 	struct drm_framebuffer *fb = NULL, *old_fb = NULL;
 	struct drm_framebuffer *fb = NULL, *old_fb = NULL;
 	struct drm_pending_vblank_event *e = NULL;
 	struct drm_pending_vblank_event *e = NULL;
 	unsigned long flags;
 	unsigned long flags;
-	int hdisplay, vdisplay;
 	int ret = -EINVAL;
 	int ret = -EINVAL;
 
 
 	if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
 	if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
@@ -3568,7 +3621,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
 
 
 	obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
 	obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
 	if (!obj)
 	if (!obj)
-		return -EINVAL;
+		return -ENOENT;
 	crtc = obj_to_crtc(obj);
 	crtc = obj_to_crtc(obj);
 
 
 	mutex_lock(&crtc->mutex);
 	mutex_lock(&crtc->mutex);
@@ -3585,25 +3638,14 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
 		goto out;
 		goto out;
 
 
 	fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
 	fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
-	if (!fb)
+	if (!fb) {
+		ret = -ENOENT;
 		goto out;
 		goto out;
+	}
 
 
-	hdisplay = crtc->mode.hdisplay;
-	vdisplay = crtc->mode.vdisplay;
-
-	if (crtc->invert_dimensions)
-		swap(hdisplay, vdisplay);
-
-	if (hdisplay > fb->width ||
-	    vdisplay > fb->height ||
-	    crtc->x > fb->width - hdisplay ||
-	    crtc->y > fb->height - vdisplay) {
-		DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
-			      fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y,
-			      crtc->invert_dimensions ? " (inverted)" : "");
-		ret = -ENOSPC;
+	ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
+	if (ret)
 		goto out;
 		goto out;
-	}
 
 
 	if (crtc->fb->pixel_format != fb->pixel_format) {
 	if (crtc->fb->pixel_format != fb->pixel_format) {
 		DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
 		DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
@@ -3788,7 +3830,8 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
 		*bpp = 32;
 		*bpp = 32;
 		break;
 		break;
 	default:
 	default:
-		DRM_DEBUG_KMS("unsupported pixel format\n");
+		DRM_DEBUG_KMS("unsupported pixel format %s\n",
+			      drm_get_format_name(format));
 		*depth = 0;
 		*depth = 0;
 		*bpp = 0;
 		*bpp = 0;
 		break;
 		break;

+ 49 - 47
drivers/gpu/drm/drm_crtc_helper.c

@@ -39,6 +39,10 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_edid.h>
 
 
+MODULE_AUTHOR("David Airlie, Jesse Barnes");
+MODULE_DESCRIPTION("DRM KMS helper");
+MODULE_LICENSE("GPL and additional rights");
+
 /**
 /**
  * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
  * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
  * 						connector list
  * 						connector list
@@ -76,7 +80,8 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
 {
 {
 	struct drm_display_mode *mode;
 	struct drm_display_mode *mode;
 
 
-	if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
+	if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
+		      DRM_MODE_FLAG_3D_MASK))
 		return;
 		return;
 
 
 	list_for_each_entry(mode, &connector->modes, head) {
 	list_for_each_entry(mode, &connector->modes, head) {
@@ -86,6 +91,9 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
 		if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
 		if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
 				!(flags & DRM_MODE_FLAG_DBLSCAN))
 				!(flags & DRM_MODE_FLAG_DBLSCAN))
 			mode->status = MODE_NO_DBLESCAN;
 			mode->status = MODE_NO_DBLESCAN;
+		if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
+				!(flags & DRM_MODE_FLAG_3D_MASK))
+			mode->status = MODE_NO_STEREO;
 	}
 	}
 
 
 	return;
 	return;
@@ -105,9 +113,9 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
  * then culled (based on validity and the @maxX, @maxY parameters) and put into
  * then culled (based on validity and the @maxX, @maxY parameters) and put into
  * the normal modes list.
  * the normal modes list.
  *
  *
- * Intended to be use as a generic implementation of the ->probe() @connector
- * callback for drivers that use the crtc helpers for output mode filtering and
- * detection.
+ * Intended to be use as a generic implementation of the ->fill_modes()
+ * @connector vfunc for drivers that use the crtc helpers for output mode
+ * filtering and detection.
  *
  *
  * RETURNS:
  * RETURNS:
  * Number of modes found on @connector.
  * Number of modes found on @connector.
@@ -175,6 +183,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
 		mode_flags |= DRM_MODE_FLAG_INTERLACE;
 		mode_flags |= DRM_MODE_FLAG_INTERLACE;
 	if (connector->doublescan_allowed)
 	if (connector->doublescan_allowed)
 		mode_flags |= DRM_MODE_FLAG_DBLSCAN;
 		mode_flags |= DRM_MODE_FLAG_DBLSCAN;
+	if (connector->stereo_allowed)
+		mode_flags |= DRM_MODE_FLAG_3D_MASK;
 	drm_mode_validate_flag(connector, mode_flags);
 	drm_mode_validate_flag(connector, mode_flags);
 
 
 	list_for_each_entry(mode, &connector->modes, head) {
 	list_for_each_entry(mode, &connector->modes, head) {
@@ -395,22 +405,25 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
 			      struct drm_framebuffer *old_fb)
 			      struct drm_framebuffer *old_fb)
 {
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_device *dev = crtc->dev;
-	struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+	struct drm_display_mode *adjusted_mode, saved_mode;
 	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 	struct drm_encoder_helper_funcs *encoder_funcs;
 	struct drm_encoder_helper_funcs *encoder_funcs;
 	int saved_x, saved_y;
 	int saved_x, saved_y;
+	bool saved_enabled;
 	struct drm_encoder *encoder;
 	struct drm_encoder *encoder;
 	bool ret = true;
 	bool ret = true;
 
 
+	saved_enabled = crtc->enabled;
 	crtc->enabled = drm_helper_crtc_in_use(crtc);
 	crtc->enabled = drm_helper_crtc_in_use(crtc);
 	if (!crtc->enabled)
 	if (!crtc->enabled)
 		return true;
 		return true;
 
 
 	adjusted_mode = drm_mode_duplicate(dev, mode);
 	adjusted_mode = drm_mode_duplicate(dev, mode);
-	if (!adjusted_mode)
+	if (!adjusted_mode) {
+		crtc->enabled = saved_enabled;
 		return false;
 		return false;
+	}
 
 
-	saved_hwmode = crtc->hwmode;
 	saved_mode = crtc->mode;
 	saved_mode = crtc->mode;
 	saved_x = crtc->x;
 	saved_x = crtc->x;
 	saved_y = crtc->y;
 	saved_y = crtc->y;
@@ -529,7 +542,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
 done:
 done:
 	drm_mode_destroy(dev, adjusted_mode);
 	drm_mode_destroy(dev, adjusted_mode);
 	if (!ret) {
 	if (!ret) {
-		crtc->hwmode = saved_hwmode;
+		crtc->enabled = saved_enabled;
 		crtc->mode = saved_mode;
 		crtc->mode = saved_mode;
 		crtc->x = saved_x;
 		crtc->x = saved_x;
 		crtc->y = saved_y;
 		crtc->y = saved_y;
@@ -557,6 +570,14 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
 				continue;
 				continue;
 
 
 			connector->encoder = NULL;
 			connector->encoder = NULL;
+
+			/*
+			 * drm_helper_disable_unused_functions() ought to be
+			 * doing this, but since we've decoupled the encoder
+			 * from the connector above, the required connection
+			 * between them is henceforth no longer available.
+			 */
+			connector->dpms = DRM_MODE_DPMS_OFF;
 		}
 		}
 	}
 	}
 
 
@@ -583,9 +604,8 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
 int drm_crtc_helper_set_config(struct drm_mode_set *set)
 int drm_crtc_helper_set_config(struct drm_mode_set *set)
 {
 {
 	struct drm_device *dev;
 	struct drm_device *dev;
-	struct drm_crtc *save_crtcs, *new_crtc, *crtc;
+	struct drm_crtc *new_crtc;
 	struct drm_encoder *save_encoders, *new_encoder, *encoder;
 	struct drm_encoder *save_encoders, *new_encoder, *encoder;
-	struct drm_framebuffer *old_fb = NULL;
 	bool mode_changed = false; /* if true do a full mode set */
 	bool mode_changed = false; /* if true do a full mode set */
 	bool fb_changed = false; /* if true and !mode_changed just do a flip */
 	bool fb_changed = false; /* if true and !mode_changed just do a flip */
 	struct drm_connector *save_connectors, *connector;
 	struct drm_connector *save_connectors, *connector;
@@ -621,37 +641,27 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
 
 
 	dev = set->crtc->dev;
 	dev = set->crtc->dev;
 
 
-	/* Allocate space for the backup of all (non-pointer) crtc, encoder and
-	 * connector data. */
-	save_crtcs = kzalloc(dev->mode_config.num_crtc *
-			     sizeof(struct drm_crtc), GFP_KERNEL);
-	if (!save_crtcs)
-		return -ENOMEM;
-
+	/*
+	 * Allocate space for the backup of all (non-pointer) encoder and
+	 * connector data.
+	 */
 	save_encoders = kzalloc(dev->mode_config.num_encoder *
 	save_encoders = kzalloc(dev->mode_config.num_encoder *
 				sizeof(struct drm_encoder), GFP_KERNEL);
 				sizeof(struct drm_encoder), GFP_KERNEL);
-	if (!save_encoders) {
-		kfree(save_crtcs);
+	if (!save_encoders)
 		return -ENOMEM;
 		return -ENOMEM;
-	}
 
 
 	save_connectors = kzalloc(dev->mode_config.num_connector *
 	save_connectors = kzalloc(dev->mode_config.num_connector *
 				sizeof(struct drm_connector), GFP_KERNEL);
 				sizeof(struct drm_connector), GFP_KERNEL);
 	if (!save_connectors) {
 	if (!save_connectors) {
-		kfree(save_crtcs);
 		kfree(save_encoders);
 		kfree(save_encoders);
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 
 
-	/* Copy data. Note that driver private data is not affected.
+	/*
+	 * Copy data. Note that driver private data is not affected.
 	 * Should anything bad happen only the expected state is
 	 * Should anything bad happen only the expected state is
 	 * restored, not the drivers personal bookkeeping.
 	 * restored, not the drivers personal bookkeeping.
 	 */
 	 */
-	count = 0;
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		save_crtcs[count++] = *crtc;
-	}
-
 	count = 0;
 	count = 0;
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		save_encoders[count++] = *encoder;
 		save_encoders[count++] = *encoder;
@@ -775,19 +785,17 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
 		mode_changed = true;
 		mode_changed = true;
 
 
 	if (mode_changed) {
 	if (mode_changed) {
-		set->crtc->enabled = drm_helper_crtc_in_use(set->crtc);
-		if (set->crtc->enabled) {
+		if (drm_helper_crtc_in_use(set->crtc)) {
 			DRM_DEBUG_KMS("attempting to set mode from"
 			DRM_DEBUG_KMS("attempting to set mode from"
 					" userspace\n");
 					" userspace\n");
 			drm_mode_debug_printmodeline(set->mode);
 			drm_mode_debug_printmodeline(set->mode);
-			old_fb = set->crtc->fb;
 			set->crtc->fb = set->fb;
 			set->crtc->fb = set->fb;
 			if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
 			if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
 						      set->x, set->y,
 						      set->x, set->y,
-						      old_fb)) {
+						      save_set.fb)) {
 				DRM_ERROR("failed to set mode on [CRTC:%d]\n",
 				DRM_ERROR("failed to set mode on [CRTC:%d]\n",
 					  set->crtc->base.id);
 					  set->crtc->base.id);
-				set->crtc->fb = old_fb;
+				set->crtc->fb = save_set.fb;
 				ret = -EINVAL;
 				ret = -EINVAL;
 				goto fail;
 				goto fail;
 			}
 			}
@@ -802,30 +810,23 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
 	} else if (fb_changed) {
 	} else if (fb_changed) {
 		set->crtc->x = set->x;
 		set->crtc->x = set->x;
 		set->crtc->y = set->y;
 		set->crtc->y = set->y;
-
-		old_fb = set->crtc->fb;
-		if (set->crtc->fb != set->fb)
-			set->crtc->fb = set->fb;
+		set->crtc->fb = set->fb;
 		ret = crtc_funcs->mode_set_base(set->crtc,
 		ret = crtc_funcs->mode_set_base(set->crtc,
-						set->x, set->y, old_fb);
+						set->x, set->y, save_set.fb);
 		if (ret != 0) {
 		if (ret != 0) {
-			set->crtc->fb = old_fb;
+			set->crtc->x = save_set.x;
+			set->crtc->y = save_set.y;
+			set->crtc->fb = save_set.fb;
 			goto fail;
 			goto fail;
 		}
 		}
 	}
 	}
 
 
 	kfree(save_connectors);
 	kfree(save_connectors);
 	kfree(save_encoders);
 	kfree(save_encoders);
-	kfree(save_crtcs);
 	return 0;
 	return 0;
 
 
 fail:
 fail:
 	/* Restore all previous data. */
 	/* Restore all previous data. */
-	count = 0;
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		*crtc = save_crtcs[count++];
-	}
-
 	count = 0;
 	count = 0;
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		*encoder = save_encoders[count++];
 		*encoder = save_encoders[count++];
@@ -844,7 +845,6 @@ fail:
 
 
 	kfree(save_connectors);
 	kfree(save_connectors);
 	kfree(save_encoders);
 	kfree(save_encoders);
-	kfree(save_crtcs);
 	return ret;
 	return ret;
 }
 }
 EXPORT_SYMBOL(drm_crtc_helper_set_config);
 EXPORT_SYMBOL(drm_crtc_helper_set_config);
@@ -1125,14 +1125,14 @@ void drm_kms_helper_poll_fini(struct drm_device *dev)
 }
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_fini);
 EXPORT_SYMBOL(drm_kms_helper_poll_fini);
 
 
-void drm_helper_hpd_irq_event(struct drm_device *dev)
+bool drm_helper_hpd_irq_event(struct drm_device *dev)
 {
 {
 	struct drm_connector *connector;
 	struct drm_connector *connector;
 	enum drm_connector_status old_status;
 	enum drm_connector_status old_status;
 	bool changed = false;
 	bool changed = false;
 
 
 	if (!dev->mode_config.poll_enabled)
 	if (!dev->mode_config.poll_enabled)
-		return;
+		return false;
 
 
 	mutex_lock(&dev->mode_config.mutex);
 	mutex_lock(&dev->mode_config.mutex);
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -1157,5 +1157,7 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
 
 
 	if (changed)
 	if (changed)
 		drm_kms_helper_hotplug_event(dev);
 		drm_kms_helper_hotplug_event(dev);
+
+	return changed;
 }
 }
 EXPORT_SYMBOL(drm_helper_hpd_irq_event);
 EXPORT_SYMBOL(drm_helper_hpd_irq_event);

+ 3 - 3
drivers/gpu/drm/drm_debugfs.c

@@ -42,7 +42,7 @@
  * Initialization, etc.
  * Initialization, etc.
  **************************************************/
  **************************************************/
 
 
-static struct drm_info_list drm_debugfs_list[] = {
+static const struct drm_info_list drm_debugfs_list[] = {
 	{"name", drm_name_info, 0},
 	{"name", drm_name_info, 0},
 	{"vm", drm_vm_info, 0},
 	{"vm", drm_vm_info, 0},
 	{"clients", drm_clients_info, 0},
 	{"clients", drm_clients_info, 0},
@@ -84,7 +84,7 @@ static const struct file_operations drm_debugfs_fops = {
  * Create a given set of debugfs files represented by an array of
  * Create a given set of debugfs files represented by an array of
  * gdm_debugfs_lists in the given root directory.
  * gdm_debugfs_lists in the given root directory.
  */
  */
-int drm_debugfs_create_files(struct drm_info_list *files, int count,
+int drm_debugfs_create_files(const struct drm_info_list *files, int count,
 			     struct dentry *root, struct drm_minor *minor)
 			     struct dentry *root, struct drm_minor *minor)
 {
 {
 	struct drm_device *dev = minor->dev;
 	struct drm_device *dev = minor->dev;
@@ -188,7 +188,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
  *
  *
  * Remove all debugfs entries created by debugfs_init().
  * Remove all debugfs entries created by debugfs_init().
  */
  */
-int drm_debugfs_remove_files(struct drm_info_list *files, int count,
+int drm_debugfs_remove_files(const struct drm_info_list *files, int count,
 			     struct drm_minor *minor)
 			     struct drm_minor *minor)
 {
 {
 	struct list_head *pos, *q;
 	struct list_head *pos, *q;

+ 8 - 8
drivers/gpu/drm/drm_dp_helper.c

@@ -228,12 +228,12 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
 EXPORT_SYMBOL(i2c_dp_aux_add_bus);
 EXPORT_SYMBOL(i2c_dp_aux_add_bus);
 
 
 /* Helpers for DP link training */
 /* Helpers for DP link training */
-static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
 {
 {
 	return link_status[r - DP_LANE0_1_STATUS];
 	return link_status[r - DP_LANE0_1_STATUS];
 }
 }
 
 
-static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+static u8 dp_get_lane_status(const u8 link_status[DP_LINK_STATUS_SIZE],
 			     int lane)
 			     int lane)
 {
 {
 	int i = DP_LANE0_1_STATUS + (lane >> 1);
 	int i = DP_LANE0_1_STATUS + (lane >> 1);
@@ -242,7 +242,7 @@ static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
 	return (l >> s) & 0xf;
 	return (l >> s) & 0xf;
 }
 }
 
 
-bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
 			  int lane_count)
 			  int lane_count)
 {
 {
 	u8 lane_align;
 	u8 lane_align;
@@ -262,7 +262,7 @@ bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
 }
 }
 EXPORT_SYMBOL(drm_dp_channel_eq_ok);
 EXPORT_SYMBOL(drm_dp_channel_eq_ok);
 
 
-bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
 			      int lane_count)
 			      int lane_count)
 {
 {
 	int lane;
 	int lane;
@@ -277,7 +277,7 @@ bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
 }
 }
 EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
 EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
 
 
-u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
 				     int lane)
 				     int lane)
 {
 {
 	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
 	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
@@ -290,7 +290,7 @@ u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
 }
 }
 EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
 EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
 
 
-u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
 					  int lane)
 					  int lane)
 {
 {
 	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
 	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
@@ -303,7 +303,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
 }
 }
 EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
 EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
 
 
-void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
 	if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
 	if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
 		udelay(100);
 		udelay(100);
 	else
 	else
@@ -311,7 +311,7 @@ void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
 }
 }
 EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
 EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
 
 
-void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
 	if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
 	if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
 		udelay(400);
 		udelay(400);
 	else
 	else

+ 2 - 72
drivers/gpu/drm/drm_drv.c

@@ -69,6 +69,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
 	DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
 
 
 	DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -170,76 +171,6 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 
 
 #define DRM_CORE_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )
 #define DRM_CORE_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )
 
 
-/**
- * drm_legacy_dev_reinit
- *
- * Reinitializes a legacy/ums drm device in it's lastclose function.
- */
-static void drm_legacy_dev_reinit(struct drm_device *dev)
-{
-	int i;
-
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
-		return;
-
-	atomic_set(&dev->ioctl_count, 0);
-	atomic_set(&dev->vma_count, 0);
-
-	for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
-		atomic_set(&dev->counts[i], 0);
-
-	dev->sigdata.lock = NULL;
-
-	dev->context_flag = 0;
-	dev->last_context = 0;
-	dev->if_version = 0;
-}
-
-/**
- * Take down the DRM device.
- *
- * \param dev DRM device structure.
- *
- * Frees every resource in \p dev.
- *
- * \sa drm_device
- */
-int drm_lastclose(struct drm_device * dev)
-{
-	struct drm_vma_entry *vma, *vma_temp;
-
-	DRM_DEBUG("\n");
-
-	if (dev->driver->lastclose)
-		dev->driver->lastclose(dev);
-	DRM_DEBUG("driver lastclose completed\n");
-
-	if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
-		drm_irq_uninstall(dev);
-
-	mutex_lock(&dev->struct_mutex);
-
-	drm_agp_clear(dev);
-
-	drm_legacy_sg_cleanup(dev);
-
-	/* Clear vma list (only built for debugging) */
-	list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
-		list_del(&vma->head);
-		kfree(vma);
-	}
-
-	drm_legacy_dma_takedown(dev);
-
-	dev->dev_mapping = NULL;
-	mutex_unlock(&dev->struct_mutex);
-
-	drm_legacy_dev_reinit(dev);
-
-	DRM_DEBUG("lastclose completed\n");
-	return 0;
-}
-
 /** File operations structure */
 /** File operations structure */
 static const struct file_operations drm_stub_fops = {
 static const struct file_operations drm_stub_fops = {
 	.owner = THIS_MODULE,
 	.owner = THIS_MODULE,
@@ -385,7 +316,6 @@ long drm_ioctl(struct file *filp,
 		return -ENODEV;
 		return -ENODEV;
 
 
 	atomic_inc(&dev->ioctl_count);
 	atomic_inc(&dev->ioctl_count);
-	atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
 	++file_priv->ioctl_count;
 	++file_priv->ioctl_count;
 
 
 	if ((nr >= DRM_CORE_IOCTL_COUNT) &&
 	if ((nr >= DRM_CORE_IOCTL_COUNT) &&
@@ -473,7 +403,7 @@ long drm_ioctl(struct file *filp,
 
 
       err_i1:
       err_i1:
 	if (!ioctl)
 	if (!ioctl)
-		DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
+		DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
 			  task_pid_nr(current),
 			  task_pid_nr(current),
 			  (long)old_encode_dev(file_priv->minor->device),
 			  (long)old_encode_dev(file_priv->minor->device),
 			  file_priv->authenticated, cmd, nr);
 			  file_priv->authenticated, cmd, nr);

+ 281 - 33
drivers/gpu/drm/drm_edid.c

@@ -458,6 +458,15 @@ static const struct drm_display_mode drm_dmt_modes[] = {
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
 };
 };
 
 
+/*
+ * These more or less come from the DMT spec.  The 720x400 modes are
+ * inferred from historical 80x25 practice.  The 640x480@67 and 832x624@75
+ * modes are old-school Mac modes.  The EDID spec says the 1152x864@75 mode
+ * should be 1152x870, again for the Mac, but instead we use the x864 DMT
+ * mode.
+ *
+ * The DMT modes have been fact-checked; the rest are mild guesses.
+ */
 static const struct drm_display_mode edid_est_modes[] = {
 static const struct drm_display_mode edid_est_modes[] = {
 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
 		   968, 1056, 0, 600, 601, 605, 628, 0,
 		   968, 1056, 0, 600, 601, 605, 628, 0,
@@ -560,7 +569,7 @@ static const struct minimode est3_modes[] = {
 	{ 1600, 1200, 75, 0 },
 	{ 1600, 1200, 75, 0 },
 	{ 1600, 1200, 85, 0 },
 	{ 1600, 1200, 85, 0 },
 	{ 1792, 1344, 60, 0 },
 	{ 1792, 1344, 60, 0 },
-	{ 1792, 1344, 85, 0 },
+	{ 1792, 1344, 75, 0 },
 	{ 1856, 1392, 60, 0 },
 	{ 1856, 1392, 60, 0 },
 	{ 1856, 1392, 75, 0 },
 	{ 1856, 1392, 75, 0 },
 	{ 1920, 1200, 60, 1 },
 	{ 1920, 1200, 60, 1 },
@@ -1264,6 +1273,18 @@ struct edid *drm_get_edid(struct drm_connector *connector,
 }
 }
 EXPORT_SYMBOL(drm_get_edid);
 EXPORT_SYMBOL(drm_get_edid);
 
 
+/**
+ * drm_edid_duplicate - duplicate an EDID and the extensions
+ * @edid: EDID to duplicate
+ *
+ * Return duplicate edid or NULL on allocation failure.
+ */
+struct edid *drm_edid_duplicate(const struct edid *edid)
+{
+	return kmemdup(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+}
+EXPORT_SYMBOL(drm_edid_duplicate);
+
 /*** EDID parsing ***/
 /*** EDID parsing ***/
 
 
 /**
 /**
@@ -1308,7 +1329,7 @@ static u32 edid_get_quirks(struct edid *edid)
 }
 }
 
 
 #define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
 #define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
-#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
+#define MODE_REFRESH_DIFF(c,t) (abs((c) - (t)))
 
 
 /**
 /**
  * edid_fixup_preferred - set preferred modes based on quirk list
  * edid_fixup_preferred - set preferred modes based on quirk list
@@ -1323,6 +1344,7 @@ static void edid_fixup_preferred(struct drm_connector *connector,
 {
 {
 	struct drm_display_mode *t, *cur_mode, *preferred_mode;
 	struct drm_display_mode *t, *cur_mode, *preferred_mode;
 	int target_refresh = 0;
 	int target_refresh = 0;
+	int cur_vrefresh, preferred_vrefresh;
 
 
 	if (list_empty(&connector->probed_modes))
 	if (list_empty(&connector->probed_modes))
 		return;
 		return;
@@ -1345,10 +1367,14 @@ static void edid_fixup_preferred(struct drm_connector *connector,
 		if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
 		if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
 			preferred_mode = cur_mode;
 			preferred_mode = cur_mode;
 
 
+		cur_vrefresh = cur_mode->vrefresh ?
+			cur_mode->vrefresh : drm_mode_vrefresh(cur_mode);
+		preferred_vrefresh = preferred_mode->vrefresh ?
+			preferred_mode->vrefresh : drm_mode_vrefresh(preferred_mode);
 		/* At a given size, try to get closest to target refresh */
 		/* At a given size, try to get closest to target refresh */
 		if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
 		if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
-		    MODE_REFRESH_DIFF(cur_mode, target_refresh) <
-		    MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
+		    MODE_REFRESH_DIFF(cur_vrefresh, target_refresh) <
+		    MODE_REFRESH_DIFF(preferred_vrefresh, target_refresh)) {
 			preferred_mode = cur_mode;
 			preferred_mode = cur_mode;
 		}
 		}
 	}
 	}
@@ -2068,7 +2094,7 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
 	u8 *est = ((u8 *)timing) + 5;
 	u8 *est = ((u8 *)timing) + 5;
 
 
 	for (i = 0; i < 6; i++) {
 	for (i = 0; i < 6; i++) {
-		for (j = 7; j > 0; j--) {
+		for (j = 7; j >= 0; j--) {
 			m = (i * 8) + (7 - j);
 			m = (i * 8) + (7 - j);
 			if (m >= ARRAY_SIZE(est3_modes))
 			if (m >= ARRAY_SIZE(est3_modes))
 				break;
 				break;
@@ -2404,7 +2430,7 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
 
 
 		if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
 		if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
 		     KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
 		     KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
-		    drm_mode_equal_no_clocks(to_match, cea_mode))
+		    drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode))
 			return mode + 1;
 			return mode + 1;
 	}
 	}
 	return 0;
 	return 0;
@@ -2453,7 +2479,7 @@ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
 
 
 		if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
 		if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
 		     KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
 		     KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
-		    drm_mode_equal_no_clocks(to_match, hdmi_mode))
+		    drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode))
 			return mode + 1;
 			return mode + 1;
 	}
 	}
 	return 0;
 	return 0;
@@ -2507,6 +2533,9 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
 		if (!newmode)
 		if (!newmode)
 			continue;
 			continue;
 
 
+		/* Carry over the stereo flags */
+		newmode->flags |= mode->flags & DRM_MODE_FLAG_3D_MASK;
+
 		/*
 		/*
 		 * The current mode could be either variant. Make
 		 * The current mode could be either variant. Make
 		 * sure to pick the "other" clock for the new mode.
 		 * sure to pick the "other" clock for the new mode.
@@ -2553,20 +2582,151 @@ do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
 	return modes;
 	return modes;
 }
 }
 
 
+struct stereo_mandatory_mode {
+	int width, height, vrefresh;
+	unsigned int flags;
+};
+
+static const struct stereo_mandatory_mode stereo_mandatory_modes[] = {
+	{ 1920, 1080, 24, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
+	{ 1920, 1080, 24, DRM_MODE_FLAG_3D_FRAME_PACKING },
+	{ 1920, 1080, 50,
+	  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
+	{ 1920, 1080, 60,
+	  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
+	{ 1280, 720,  50, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
+	{ 1280, 720,  50, DRM_MODE_FLAG_3D_FRAME_PACKING },
+	{ 1280, 720,  60, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
+	{ 1280, 720,  60, DRM_MODE_FLAG_3D_FRAME_PACKING }
+};
+
+static bool
+stereo_match_mandatory(const struct drm_display_mode *mode,
+		       const struct stereo_mandatory_mode *stereo_mode)
+{
+	unsigned int interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+
+	return mode->hdisplay == stereo_mode->width &&
+	       mode->vdisplay == stereo_mode->height &&
+	       interlaced == (stereo_mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+	       drm_mode_vrefresh(mode) == stereo_mode->vrefresh;
+}
+
+static int add_hdmi_mandatory_stereo_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	const struct drm_display_mode *mode;
+	struct list_head stereo_modes;
+	int modes = 0, i;
+
+	INIT_LIST_HEAD(&stereo_modes);
+
+	list_for_each_entry(mode, &connector->probed_modes, head) {
+		for (i = 0; i < ARRAY_SIZE(stereo_mandatory_modes); i++) {
+			const struct stereo_mandatory_mode *mandatory;
+			struct drm_display_mode *new_mode;
+
+			if (!stereo_match_mandatory(mode,
+						    &stereo_mandatory_modes[i]))
+				continue;
+
+			mandatory = &stereo_mandatory_modes[i];
+			new_mode = drm_mode_duplicate(dev, mode);
+			if (!new_mode)
+				continue;
+
+			new_mode->flags |= mandatory->flags;
+			list_add_tail(&new_mode->head, &stereo_modes);
+			modes++;
+		}
+	}
+
+	list_splice_tail(&stereo_modes, &connector->probed_modes);
+
+	return modes;
+}
+
+static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *newmode;
+
+	vic--; /* VICs start at 1 */
+	if (vic >= ARRAY_SIZE(edid_4k_modes)) {
+		DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
+		return 0;
+	}
+
+	newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
+	if (!newmode)
+		return 0;
+
+	drm_mode_probed_add(connector, newmode);
+
+	return 1;
+}
+
+static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
+			       const u8 *video_db, u8 video_len, u8 video_index)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *newmode;
+	int modes = 0;
+	u8 cea_mode;
+
+	if (video_db == NULL || video_index > video_len)
+		return 0;
+
+	/* CEA modes are numbered 1..127 */
+	cea_mode = (video_db[video_index] & 127) - 1;
+	if (cea_mode >= ARRAY_SIZE(edid_cea_modes))
+		return 0;
+
+	if (structure & (1 << 0)) {
+		newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+		if (newmode) {
+			newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING;
+			drm_mode_probed_add(connector, newmode);
+			modes++;
+		}
+	}
+	if (structure & (1 << 6)) {
+		newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+		if (newmode) {
+			newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
+			drm_mode_probed_add(connector, newmode);
+			modes++;
+		}
+	}
+	if (structure & (1 << 8)) {
+		newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+		if (newmode) {
+			newmode->flags = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
+			drm_mode_probed_add(connector, newmode);
+			modes++;
+		}
+	}
+
+	return modes;
+}
+
 /*
 /*
  * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
  * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
  * @connector: connector corresponding to the HDMI sink
  * @connector: connector corresponding to the HDMI sink
  * @db: start of the CEA vendor specific block
  * @db: start of the CEA vendor specific block
  * @len: length of the CEA block payload, ie. one can access up to db[len]
  * @len: length of the CEA block payload, ie. one can access up to db[len]
  *
  *
- * Parses the HDMI VSDB looking for modes to add to @connector.
+ * Parses the HDMI VSDB looking for modes to add to @connector. This function
+ * also adds the stereo 3d modes when applicable.
  */
  */
 static int
 static int
-do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
+do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
+		   const u8 *video_db, u8 video_len)
 {
 {
-	struct drm_device *dev = connector->dev;
-	int modes = 0, offset = 0, i;
-	u8 vic_len;
+	int modes = 0, offset = 0, i, multi_present = 0;
+	u8 vic_len, hdmi_3d_len = 0;
+	u16 mask;
+	u16 structure_all;
 
 
 	if (len < 8)
 	if (len < 8)
 		goto out;
 		goto out;
@@ -2585,30 +2745,56 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
 
 
 	/* the declared length is not long enough for the 2 first bytes
 	/* the declared length is not long enough for the 2 first bytes
 	 * of additional video format capabilities */
 	 * of additional video format capabilities */
-	offset += 2;
-	if (len < (8 + offset))
+	if (len < (8 + offset + 2))
 		goto out;
 		goto out;
 
 
+	/* 3D_Present */
+	offset++;
+	if (db[8 + offset] & (1 << 7)) {
+		modes += add_hdmi_mandatory_stereo_modes(connector);
+
+		/* 3D_Multi_present */
+		multi_present = (db[8 + offset] & 0x60) >> 5;
+	}
+
+	offset++;
 	vic_len = db[8 + offset] >> 5;
 	vic_len = db[8 + offset] >> 5;
+	hdmi_3d_len = db[8 + offset] & 0x1f;
 
 
 	for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
 	for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
-		struct drm_display_mode *newmode;
 		u8 vic;
 		u8 vic;
 
 
 		vic = db[9 + offset + i];
 		vic = db[9 + offset + i];
+		modes += add_hdmi_mode(connector, vic);
+	}
+	offset += 1 + vic_len;
 
 
-		vic--; /* VICs start at 1 */
-		if (vic >= ARRAY_SIZE(edid_4k_modes)) {
-			DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
-			continue;
-		}
+	if (!(multi_present == 1 || multi_present == 2))
+		goto out;
 
 
-		newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
-		if (!newmode)
-			continue;
+	if ((multi_present == 1 && len < (9 + offset)) ||
+	    (multi_present == 2 && len < (11 + offset)))
+		goto out;
 
 
-		drm_mode_probed_add(connector, newmode);
-		modes++;
+	if ((multi_present == 1 && hdmi_3d_len < 2) ||
+	    (multi_present == 2 && hdmi_3d_len < 4))
+		goto out;
+
+	/* 3D_Structure_ALL */
+	structure_all = (db[8 + offset] << 8) | db[9 + offset];
+
+	/* check if 3D_MASK is present */
+	if (multi_present == 2)
+		mask = (db[10 + offset] << 8) | db[11 + offset];
+	else
+		mask = 0xffff;
+
+	for (i = 0; i < 16; i++) {
+		if (mask & (1 << i))
+			modes += add_3d_struct_modes(connector,
+						     structure_all,
+						     video_db,
+						     video_len, i);
 	}
 	}
 
 
 out:
 out:
@@ -2668,8 +2854,8 @@ static int
 add_cea_modes(struct drm_connector *connector, struct edid *edid)
 add_cea_modes(struct drm_connector *connector, struct edid *edid)
 {
 {
 	const u8 *cea = drm_find_cea_extension(edid);
 	const u8 *cea = drm_find_cea_extension(edid);
-	const u8 *db;
-	u8 dbl;
+	const u8 *db, *hdmi = NULL, *video = NULL;
+	u8 dbl, hdmi_len, video_len = 0;
 	int modes = 0;
 	int modes = 0;
 
 
 	if (cea && cea_revision(cea) >= 3) {
 	if (cea && cea_revision(cea) >= 3) {
@@ -2682,13 +2868,26 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
 			db = &cea[i];
 			db = &cea[i];
 			dbl = cea_db_payload_len(db);
 			dbl = cea_db_payload_len(db);
 
 
-			if (cea_db_tag(db) == VIDEO_BLOCK)
-				modes += do_cea_modes(connector, db + 1, dbl);
-			else if (cea_db_is_hdmi_vsdb(db))
-				modes += do_hdmi_vsdb_modes(connector, db, dbl);
+			if (cea_db_tag(db) == VIDEO_BLOCK) {
+				video = db + 1;
+				video_len = dbl;
+				modes += do_cea_modes(connector, video, dbl);
+			}
+			else if (cea_db_is_hdmi_vsdb(db)) {
+				hdmi = db;
+				hdmi_len = dbl;
+			}
 		}
 		}
 	}
 	}
 
 
+	/*
+	 * We parse the HDMI VSDB after having added the cea modes as we will
+	 * be patching their flags when the sink supports stereo 3D.
+	 */
+	if (hdmi)
+		modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len, video,
+					    video_len);
+
 	return modes;
 	return modes;
 }
 }
 
 
@@ -3288,6 +3487,19 @@ int drm_add_modes_noedid(struct drm_connector *connector,
 }
 }
 EXPORT_SYMBOL(drm_add_modes_noedid);
 EXPORT_SYMBOL(drm_add_modes_noedid);
 
 
+void drm_set_preferred_mode(struct drm_connector *connector,
+			   int hpref, int vpref)
+{
+	struct drm_display_mode *mode;
+
+	list_for_each_entry(mode, &connector->probed_modes, head) {
+		if (drm_mode_width(mode)  == hpref &&
+		    drm_mode_height(mode) == vpref)
+			mode->type |= DRM_MODE_TYPE_PREFERRED;
+	}
+}
+EXPORT_SYMBOL(drm_set_preferred_mode);
+
 /**
 /**
  * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
  * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
  *                                              data from a DRM display mode
  *                                              data from a DRM display mode
@@ -3321,6 +3533,33 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
 }
 }
 EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
 EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
 
 
+static enum hdmi_3d_structure
+s3d_structure_from_display_mode(const struct drm_display_mode *mode)
+{
+	u32 layout = mode->flags & DRM_MODE_FLAG_3D_MASK;
+
+	switch (layout) {
+	case DRM_MODE_FLAG_3D_FRAME_PACKING:
+		return HDMI_3D_STRUCTURE_FRAME_PACKING;
+	case DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE:
+		return HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE;
+	case DRM_MODE_FLAG_3D_LINE_ALTERNATIVE:
+		return HDMI_3D_STRUCTURE_LINE_ALTERNATIVE;
+	case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL:
+		return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL;
+	case DRM_MODE_FLAG_3D_L_DEPTH:
+		return HDMI_3D_STRUCTURE_L_DEPTH;
+	case DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH:
+		return HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH;
+	case DRM_MODE_FLAG_3D_TOP_AND_BOTTOM:
+		return HDMI_3D_STRUCTURE_TOP_AND_BOTTOM;
+	case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF:
+		return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF;
+	default:
+		return HDMI_3D_STRUCTURE_INVALID;
+	}
+}
+
 /**
 /**
  * drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
  * drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
  * data from a DRM display mode
  * data from a DRM display mode
@@ -3338,20 +3577,29 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
 					    const struct drm_display_mode *mode)
 					    const struct drm_display_mode *mode)
 {
 {
 	int err;
 	int err;
+	u32 s3d_flags;
 	u8 vic;
 	u8 vic;
 
 
 	if (!frame || !mode)
 	if (!frame || !mode)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	vic = drm_match_hdmi_mode(mode);
 	vic = drm_match_hdmi_mode(mode);
-	if (!vic)
+	s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK;
+
+	if (!vic && !s3d_flags)
+		return -EINVAL;
+
+	if (vic && s3d_flags)
 		return -EINVAL;
 		return -EINVAL;
 
 
 	err = hdmi_vendor_infoframe_init(frame);
 	err = hdmi_vendor_infoframe_init(frame);
 	if (err < 0)
 	if (err < 0)
 		return err;
 		return err;
 
 
-	frame->vic = vic;
+	if (vic)
+		frame->vic = vic;
+	else
+		frame->s3d_struct = s3d_structure_from_display_mode(mode);
 
 
 	return 0;
 	return 0;
 }
 }

+ 54 - 54
drivers/gpu/drm/drm_edid_load.c

@@ -32,7 +32,7 @@ MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
 	"from built-in data or /lib/firmware instead. ");
 	"from built-in data or /lib/firmware instead. ");
 
 
 #define GENERIC_EDIDS 5
 #define GENERIC_EDIDS 5
-static char *generic_edid_name[GENERIC_EDIDS] = {
+static const char *generic_edid_name[GENERIC_EDIDS] = {
 	"edid/1024x768.bin",
 	"edid/1024x768.bin",
 	"edid/1280x1024.bin",
 	"edid/1280x1024.bin",
 	"edid/1600x1200.bin",
 	"edid/1600x1200.bin",
@@ -40,7 +40,7 @@ static char *generic_edid_name[GENERIC_EDIDS] = {
 	"edid/1920x1080.bin",
 	"edid/1920x1080.bin",
 };
 };
 
 
-static u8 generic_edid[GENERIC_EDIDS][128] = {
+static const u8 generic_edid[GENERIC_EDIDS][128] = {
 	{
 	{
 	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
 	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
 	0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 	0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -133,63 +133,68 @@ static u8 generic_edid[GENERIC_EDIDS][128] = {
 	},
 	},
 };
 };
 
 
+static int edid_size(const u8 *edid, int data_size)
+{
+	if (data_size < EDID_LENGTH)
+		return 0;
+
+	return (edid[0x7e] + 1) * EDID_LENGTH;
+}
+
 static u8 *edid_load(struct drm_connector *connector, const char *name,
 static u8 *edid_load(struct drm_connector *connector, const char *name,
 			const char *connector_name)
 			const char *connector_name)
 {
 {
-	const struct firmware *fw;
-	struct platform_device *pdev;
-	u8 *fwdata = NULL, *edid, *new_edid;
-	int fwsize, expected;
-	int builtin = 0, err = 0;
+	const struct firmware *fw = NULL;
+	const u8 *fwdata;
+	u8 *edid;
+	int fwsize, builtin;
 	int i, valid_extensions = 0;
 	int i, valid_extensions = 0;
 	bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
 	bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
 
 
-	pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
-	if (IS_ERR(pdev)) {
-		DRM_ERROR("Failed to register EDID firmware platform device "
-		    "for connector \"%s\"\n", connector_name);
-		err = -EINVAL;
-		goto out;
-	}
-
-	err = request_firmware(&fw, name, &pdev->dev);
-	platform_device_unregister(pdev);
-
-	if (err) {
-		i = 0;
-		while (i < GENERIC_EDIDS && strcmp(name, generic_edid_name[i]))
-			i++;
-		if (i < GENERIC_EDIDS) {
-			err = 0;
-			builtin = 1;
+	builtin = 0;
+	for (i = 0; i < GENERIC_EDIDS; i++) {
+		if (strcmp(name, generic_edid_name[i]) == 0) {
 			fwdata = generic_edid[i];
 			fwdata = generic_edid[i];
 			fwsize = sizeof(generic_edid[i]);
 			fwsize = sizeof(generic_edid[i]);
+			builtin = 1;
+			break;
 		}
 		}
 	}
 	}
+	if (!builtin) {
+		struct platform_device *pdev;
+		int err;
 
 
-	if (err) {
-		DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
-		    name, err);
-		goto out;
-	}
+		pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
+		if (IS_ERR(pdev)) {
+			DRM_ERROR("Failed to register EDID firmware platform device "
+				  "for connector \"%s\"\n", connector_name);
+			return ERR_CAST(pdev);
+		}
+
+		err = request_firmware(&fw, name, &pdev->dev);
+		platform_device_unregister(pdev);
+		if (err) {
+			DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
+				  name, err);
+			return ERR_PTR(err);
+		}
 
 
-	if (fwdata == NULL) {
-		fwdata = (u8 *) fw->data;
+		fwdata = fw->data;
 		fwsize = fw->size;
 		fwsize = fw->size;
 	}
 	}
 
 
-	expected = (fwdata[0x7e] + 1) * EDID_LENGTH;
-	if (expected != fwsize) {
+	if (edid_size(fwdata, fwsize) != fwsize) {
 		DRM_ERROR("Size of EDID firmware \"%s\" is invalid "
 		DRM_ERROR("Size of EDID firmware \"%s\" is invalid "
-		    "(expected %d, got %d)\n", name, expected, (int) fwsize);
-		err = -EINVAL;
-		goto relfw_out;
+			  "(expected %d, got %d\n", name,
+			  edid_size(fwdata, fwsize), (int)fwsize);
+		edid = ERR_PTR(-EINVAL);
+		goto out;
 	}
 	}
 
 
 	edid = kmemdup(fwdata, fwsize, GFP_KERNEL);
 	edid = kmemdup(fwdata, fwsize, GFP_KERNEL);
 	if (edid == NULL) {
 	if (edid == NULL) {
-		err = -ENOMEM;
-		goto relfw_out;
+		edid = ERR_PTR(-ENOMEM);
+		goto out;
 	}
 	}
 
 
 	if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
 	if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
@@ -197,8 +202,8 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
 		DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
 		DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
 		    name);
 		    name);
 		kfree(edid);
 		kfree(edid);
-		err = -EINVAL;
-		goto relfw_out;
+		edid = ERR_PTR(-EINVAL);
+		goto out;
 	}
 	}
 
 
 	for (i = 1; i <= edid[0x7e]; i++) {
 	for (i = 1; i <= edid[0x7e]; i++) {
@@ -210,19 +215,18 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
 	}
 	}
 
 
 	if (valid_extensions != edid[0x7e]) {
 	if (valid_extensions != edid[0x7e]) {
+		u8 *new_edid;
+
 		edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
 		edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
 		DRM_INFO("Found %d valid extensions instead of %d in EDID data "
 		DRM_INFO("Found %d valid extensions instead of %d in EDID data "
 		    "\"%s\" for connector \"%s\"\n", valid_extensions,
 		    "\"%s\" for connector \"%s\"\n", valid_extensions,
 		    edid[0x7e], name, connector_name);
 		    edid[0x7e], name, connector_name);
 		edid[0x7e] = valid_extensions;
 		edid[0x7e] = valid_extensions;
+
 		new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
 		new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
-		    GFP_KERNEL);
-		if (new_edid == NULL) {
-			err = -ENOMEM;
-			kfree(edid);
-			goto relfw_out;
-		}
-		edid = new_edid;
+				    GFP_KERNEL);
+		if (new_edid)
+			edid = new_edid;
 	}
 	}
 
 
 	DRM_INFO("Got %s EDID base block and %d extension%s from "
 	DRM_INFO("Got %s EDID base block and %d extension%s from "
@@ -230,13 +234,9 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
 	    "external", valid_extensions, valid_extensions == 1 ? "" : "s",
 	    "external", valid_extensions, valid_extensions == 1 ? "" : "s",
 	    name, connector_name);
 	    name, connector_name);
 
 
-relfw_out:
-	release_firmware(fw);
-
 out:
 out:
-	if (err)
-		return ERR_PTR(err);
-
+	if (fw)
+		release_firmware(fw);
 	return edid;
 	return edid;
 }
 }
 
 

+ 2 - 15
drivers/gpu/drm/drm_fb_helper.c

@@ -39,10 +39,6 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_crtc_helper.h>
 
 
-MODULE_AUTHOR("David Airlie, Jesse Barnes");
-MODULE_DESCRIPTION("DRM KMS helper");
-MODULE_LICENSE("GPL and additional rights");
-
 static LIST_HEAD(kernel_fb_helper_list);
 static LIST_HEAD(kernel_fb_helper_list);
 
 
 /**
 /**
@@ -844,7 +840,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
 	struct drm_fb_helper *fb_helper = info->par;
 	struct drm_fb_helper *fb_helper = info->par;
 	struct drm_device *dev = fb_helper->dev;
 	struct drm_device *dev = fb_helper->dev;
 	struct drm_mode_set *modeset;
 	struct drm_mode_set *modeset;
-	struct drm_crtc *crtc;
 	int ret = 0;
 	int ret = 0;
 	int i;
 	int i;
 
 
@@ -855,8 +850,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
 	}
 	}
 
 
 	for (i = 0; i < fb_helper->crtc_count; i++) {
 	for (i = 0; i < fb_helper->crtc_count; i++) {
-		crtc = fb_helper->crtc_info[i].mode_set.crtc;
-
 		modeset = &fb_helper->crtc_info[i].mode_set;
 		modeset = &fb_helper->crtc_info[i].mode_set;
 
 
 		modeset->x = var->xoffset;
 		modeset->x = var->xoffset;
@@ -1352,7 +1345,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
 	struct drm_connector *connector;
 	struct drm_connector *connector;
 	struct drm_connector_helper_funcs *connector_funcs;
 	struct drm_connector_helper_funcs *connector_funcs;
 	struct drm_encoder *encoder;
 	struct drm_encoder *encoder;
-	struct drm_fb_helper_crtc *best_crtc;
 	int my_score, best_score, score;
 	int my_score, best_score, score;
 	struct drm_fb_helper_crtc **crtcs, *crtc;
 	struct drm_fb_helper_crtc **crtcs, *crtc;
 	struct drm_fb_helper_connector *fb_helper_conn;
 	struct drm_fb_helper_connector *fb_helper_conn;
@@ -1364,7 +1356,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
 	connector = fb_helper_conn->connector;
 	connector = fb_helper_conn->connector;
 
 
 	best_crtcs[n] = NULL;
 	best_crtcs[n] = NULL;
-	best_crtc = NULL;
 	best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
 	best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
 	if (modes[n] == NULL)
 	if (modes[n] == NULL)
 		return best_score;
 		return best_score;
@@ -1413,7 +1404,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
 		score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
 		score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
 						  width, height);
 						  width, height);
 		if (score > best_score) {
 		if (score > best_score) {
-			best_crtc = crtc;
 			best_score = score;
 			best_score = score;
 			memcpy(best_crtcs, crtcs,
 			memcpy(best_crtcs, crtcs,
 			       dev->mode_config.num_connector *
 			       dev->mode_config.num_connector *
@@ -1580,8 +1570,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
 int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
 int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
 {
 {
 	struct drm_device *dev = fb_helper->dev;
 	struct drm_device *dev = fb_helper->dev;
-	int count = 0;
-	u32 max_width, max_height, bpp_sel;
+	u32 max_width, max_height;
 
 
 	if (!fb_helper->fb)
 	if (!fb_helper->fb)
 		return 0;
 		return 0;
@@ -1596,10 +1585,8 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
 
 
 	max_width = fb_helper->fb->width;
 	max_width = fb_helper->fb->width;
 	max_height = fb_helper->fb->height;
 	max_height = fb_helper->fb->height;
-	bpp_sel = fb_helper->fb->bits_per_pixel;
 
 
-	count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
-						    max_height);
+	drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height);
 	mutex_unlock(&fb_helper->dev->mode_config.mutex);
 	mutex_unlock(&fb_helper->dev->mode_config.mutex);
 
 
 	drm_modeset_lock_all(dev);
 	drm_modeset_lock_all(dev);

+ 71 - 6
drivers/gpu/drm/drm_fops.c

@@ -113,7 +113,6 @@ int drm_open(struct inode *inode, struct file *filp)
 	retcode = drm_open_helper(inode, filp, dev);
 	retcode = drm_open_helper(inode, filp, dev);
 	if (retcode)
 	if (retcode)
 		goto err_undo;
 		goto err_undo;
-	atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
 	if (need_setup) {
 	if (need_setup) {
 		retcode = drm_setup(dev);
 		retcode = drm_setup(dev);
 		if (retcode)
 		if (retcode)
@@ -235,7 +234,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
 
 
 	priv->ioctl_count = 0;
 	priv->ioctl_count = 0;
 	/* for compatibility root is always authenticated */
 	/* for compatibility root is always authenticated */
-	priv->authenticated = capable(CAP_SYS_ADMIN);
+	priv->always_authenticated = capable(CAP_SYS_ADMIN);
+	priv->authenticated = priv->always_authenticated;
 	priv->lock_count = 0;
 	priv->lock_count = 0;
 
 
 	INIT_LIST_HEAD(&priv->lhead);
 	INIT_LIST_HEAD(&priv->lhead);
@@ -374,12 +374,79 @@ static void drm_events_release(struct drm_file *file_priv)
 		}
 		}
 
 
 	/* Remove unconsumed events */
 	/* Remove unconsumed events */
-	list_for_each_entry_safe(e, et, &file_priv->event_list, link)
+	list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
+		list_del(&e->link);
 		e->destroy(e);
 		e->destroy(e);
+	}
 
 
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 }
 
 
+/**
+ * drm_legacy_dev_reinit
+ *
+ * Reinitializes a legacy/ums drm device in it's lastclose function.
+ */
+static void drm_legacy_dev_reinit(struct drm_device *dev)
+{
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
+	atomic_set(&dev->ioctl_count, 0);
+	atomic_set(&dev->vma_count, 0);
+
+	dev->sigdata.lock = NULL;
+
+	dev->context_flag = 0;
+	dev->last_context = 0;
+	dev->if_version = 0;
+}
+
+/**
+ * Take down the DRM device.
+ *
+ * \param dev DRM device structure.
+ *
+ * Frees every resource in \p dev.
+ *
+ * \sa drm_device
+ */
+int drm_lastclose(struct drm_device * dev)
+{
+	struct drm_vma_entry *vma, *vma_temp;
+
+	DRM_DEBUG("\n");
+
+	if (dev->driver->lastclose)
+		dev->driver->lastclose(dev);
+	DRM_DEBUG("driver lastclose completed\n");
+
+	if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
+		drm_irq_uninstall(dev);
+
+	mutex_lock(&dev->struct_mutex);
+
+	drm_agp_clear(dev);
+
+	drm_legacy_sg_cleanup(dev);
+
+	/* Clear vma list (only built for debugging) */
+	list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
+		list_del(&vma->head);
+		kfree(vma);
+	}
+
+	drm_legacy_dma_takedown(dev);
+
+	dev->dev_mapping = NULL;
+	mutex_unlock(&dev->struct_mutex);
+
+	drm_legacy_dev_reinit(dev);
+
+	DRM_DEBUG("lastclose completed\n");
+	return 0;
+}
+
 /**
 /**
  * Release file.
  * Release file.
  *
  *
@@ -449,7 +516,6 @@ int drm_release(struct inode *inode, struct file *filp)
 
 
 				list_del(&pos->head);
 				list_del(&pos->head);
 				kfree(pos);
 				kfree(pos);
-				--dev->ctx_count;
 			}
 			}
 		}
 		}
 	}
 	}
@@ -463,7 +529,7 @@ int drm_release(struct inode *inode, struct file *filp)
 		list_for_each_entry(temp, &dev->filelist, lhead) {
 		list_for_each_entry(temp, &dev->filelist, lhead) {
 			if ((temp->master == file_priv->master) &&
 			if ((temp->master == file_priv->master) &&
 			    (temp != file_priv))
 			    (temp != file_priv))
-				temp->authenticated = 0;
+				temp->authenticated = temp->always_authenticated;
 		}
 		}
 
 
 		/**
 		/**
@@ -511,7 +577,6 @@ int drm_release(struct inode *inode, struct file *filp)
 	 * End inline drm_release
 	 * End inline drm_release
 	 */
 	 */
 
 
-	atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
 	if (!--dev->open_count) {
 	if (!--dev->open_count) {
 		if (atomic_read(&dev->ioctl_count)) {
 		if (atomic_read(&dev->ioctl_count)) {
 			DRM_ERROR("Device busy: %d\n",
 			DRM_ERROR("Device busy: %d\n",

+ 0 - 29
drivers/gpu/drm/drm_gem.c

@@ -160,35 +160,6 @@ void drm_gem_private_object_init(struct drm_device *dev,
 }
 }
 EXPORT_SYMBOL(drm_gem_private_object_init);
 EXPORT_SYMBOL(drm_gem_private_object_init);
 
 
-/**
- * Allocate a GEM object of the specified size with shmfs backing store
- */
-struct drm_gem_object *
-drm_gem_object_alloc(struct drm_device *dev, size_t size)
-{
-	struct drm_gem_object *obj;
-
-	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
-	if (!obj)
-		goto free;
-
-	if (drm_gem_object_init(dev, obj, size) != 0)
-		goto free;
-
-	if (dev->driver->gem_init_object != NULL &&
-	    dev->driver->gem_init_object(obj) != 0) {
-		goto fput;
-	}
-	return obj;
-fput:
-	/* Object_init mangles the global counters - readjust them. */
-	fput(obj->filp);
-free:
-	kfree(obj);
-	return NULL;
-}
-EXPORT_SYMBOL(drm_gem_object_alloc);
-
 static void
 static void
 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
 {
 {

+ 0 - 2
drivers/gpu/drm/drm_global.c

@@ -67,7 +67,6 @@ int drm_global_item_ref(struct drm_global_reference *ref)
 {
 {
 	int ret;
 	int ret;
 	struct drm_global_item *item = &glob[ref->global_type];
 	struct drm_global_item *item = &glob[ref->global_type];
-	void *object;
 
 
 	mutex_lock(&item->mutex);
 	mutex_lock(&item->mutex);
 	if (item->refcount == 0) {
 	if (item->refcount == 0) {
@@ -85,7 +84,6 @@ int drm_global_item_ref(struct drm_global_reference *ref)
 	}
 	}
 	++item->refcount;
 	++item->refcount;
 	ref->object = item->object;
 	ref->object = item->object;
-	object = item->object;
 	mutex_unlock(&item->mutex);
 	mutex_unlock(&item->mutex);
 	return 0;
 	return 0;
 out_err:
 out_err:

+ 3 - 3
drivers/gpu/drm/drm_info.c

@@ -163,13 +163,13 @@ int drm_vblank_info(struct seq_file *m, void *data)
 	mutex_lock(&dev->struct_mutex);
 	mutex_lock(&dev->struct_mutex);
 	for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
 	for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
 		seq_printf(m, "CRTC %d enable:     %d\n",
 		seq_printf(m, "CRTC %d enable:     %d\n",
-			   crtc, atomic_read(&dev->vblank_refcount[crtc]));
+			   crtc, atomic_read(&dev->vblank[crtc].refcount));
 		seq_printf(m, "CRTC %d counter:    %d\n",
 		seq_printf(m, "CRTC %d counter:    %d\n",
 			   crtc, drm_vblank_count(dev, crtc));
 			   crtc, drm_vblank_count(dev, crtc));
 		seq_printf(m, "CRTC %d last wait:  %d\n",
 		seq_printf(m, "CRTC %d last wait:  %d\n",
-			   crtc, dev->last_vblank_wait[crtc]);
+			   crtc, dev->vblank[crtc].last_wait);
 		seq_printf(m, "CRTC %d in modeset: %d\n",
 		seq_printf(m, "CRTC %d in modeset: %d\n",
-			   crtc, dev->vblank_inmodeset[crtc]);
+			   crtc, dev->vblank[crtc].inmodeset);
 	}
 	}
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
 	return 0;
 	return 0;

+ 21 - 0
drivers/gpu/drm/drm_ioctl.c

@@ -302,6 +302,27 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
 	return 0;
 	return 0;
 }
 }
 
 
+/**
+ * Set device/driver capabilities
+ */
+int
+drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_set_client_cap *req = data;
+
+	switch (req->capability) {
+	case DRM_CLIENT_CAP_STEREO_3D:
+		if (req->value > 1)
+			return -EINVAL;
+		file_priv->stereo_allowed = req->value;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 /**
 /**
  * Setversion ioctl.
  * Setversion ioctl.
  *
  *

+ 66 - 111
drivers/gpu/drm/drm_irq.c

@@ -43,9 +43,8 @@
 #include <linux/export.h>
 #include <linux/export.h>
 
 
 /* Access macro for slots in vblank timestamp ringbuffer. */
 /* Access macro for slots in vblank timestamp ringbuffer. */
-#define vblanktimestamp(dev, crtc, count) ( \
-	(dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
-	((count) % DRM_VBLANKTIME_RBSIZE)])
+#define vblanktimestamp(dev, crtc, count) \
+	((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE])
 
 
 /* Retry timestamp calculation up to 3 times to satisfy
 /* Retry timestamp calculation up to 3 times to satisfy
  * drm_timestamp_precision before giving up.
  * drm_timestamp_precision before giving up.
@@ -89,8 +88,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
  */
  */
 static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
 static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
 {
 {
-	memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0,
-		DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
+	memset(dev->vblank[crtc].time, 0, sizeof(dev->vblank[crtc].time));
 }
 }
 
 
 /*
 /*
@@ -115,7 +113,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
 	spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
 	spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
 
 
 	dev->driver->disable_vblank(dev, crtc);
 	dev->driver->disable_vblank(dev, crtc);
-	dev->vblank_enabled[crtc] = 0;
+	dev->vblank[crtc].enabled = false;
 
 
 	/* No further vblank irq's will be processed after
 	/* No further vblank irq's will be processed after
 	 * this point. Get current hardware vblank count and
 	 * this point. Get current hardware vblank count and
@@ -130,9 +128,9 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
 	 * delayed gpu counter increment.
 	 * delayed gpu counter increment.
 	 */
 	 */
 	do {
 	do {
-		dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+		dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc);
 		vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
 		vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
-	} while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+	} while (dev->vblank[crtc].last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
 
 
 	if (!count)
 	if (!count)
 		vblrc = 0;
 		vblrc = 0;
@@ -140,7 +138,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
 	/* Compute time difference to stored timestamp of last vblank
 	/* Compute time difference to stored timestamp of last vblank
 	 * as updated by last invocation of drm_handle_vblank() in vblank irq.
 	 * as updated by last invocation of drm_handle_vblank() in vblank irq.
 	 */
 	 */
-	vblcount = atomic_read(&dev->_vblank_count[crtc]);
+	vblcount = atomic_read(&dev->vblank[crtc].count);
 	diff_ns = timeval_to_ns(&tvblank) -
 	diff_ns = timeval_to_ns(&tvblank) -
 		  timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
 		  timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
 
 
@@ -157,7 +155,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
 	 * hope for the best.
 	 * hope for the best.
 	 */
 	 */
 	if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
 	if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
-		atomic_inc(&dev->_vblank_count[crtc]);
+		atomic_inc(&dev->vblank[crtc].count);
 		smp_mb__after_atomic_inc();
 		smp_mb__after_atomic_inc();
 	}
 	}
 
 
@@ -178,8 +176,8 @@ static void vblank_disable_fn(unsigned long arg)
 
 
 	for (i = 0; i < dev->num_crtcs; i++) {
 	for (i = 0; i < dev->num_crtcs; i++) {
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
-		if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
-		    dev->vblank_enabled[i]) {
+		if (atomic_read(&dev->vblank[i].refcount) == 0 &&
+		    dev->vblank[i].enabled) {
 			DRM_DEBUG("disabling vblank on crtc %d\n", i);
 			DRM_DEBUG("disabling vblank on crtc %d\n", i);
 			vblank_disable_and_save(dev, i);
 			vblank_disable_and_save(dev, i);
 		}
 		}
@@ -197,14 +195,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
 
 
 	vblank_disable_fn((unsigned long)dev);
 	vblank_disable_fn((unsigned long)dev);
 
 
-	kfree(dev->vbl_queue);
-	kfree(dev->_vblank_count);
-	kfree(dev->vblank_refcount);
-	kfree(dev->vblank_enabled);
-	kfree(dev->last_vblank);
-	kfree(dev->last_vblank_wait);
-	kfree(dev->vblank_inmodeset);
-	kfree(dev->_vblank_time);
+	kfree(dev->vblank);
 
 
 	dev->num_crtcs = 0;
 	dev->num_crtcs = 0;
 }
 }
@@ -221,42 +212,14 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
 
 
 	dev->num_crtcs = num_crtcs;
 	dev->num_crtcs = num_crtcs;
 
 
-	dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
-				 GFP_KERNEL);
-	if (!dev->vbl_queue)
+	dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
+	if (!dev->vblank)
 		goto err;
 		goto err;
 
 
-	dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL);
-	if (!dev->_vblank_count)
-		goto err;
-
-	dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
-				       GFP_KERNEL);
-	if (!dev->vblank_refcount)
-		goto err;
-
-	dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
-	if (!dev->vblank_enabled)
-		goto err;
+	for (i = 0; i < num_crtcs; i++)
+		init_waitqueue_head(&dev->vblank[i].queue);
 
 
-	dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
-	if (!dev->last_vblank)
-		goto err;
-
-	dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
-	if (!dev->last_vblank_wait)
-		goto err;
-
-	dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
-	if (!dev->vblank_inmodeset)
-		goto err;
-
-	dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE,
-				    sizeof(struct timeval), GFP_KERNEL);
-	if (!dev->_vblank_time)
-		goto err;
-
-	DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
+	DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
 
 
 	/* Driver specific high-precision vblank timestamping supported? */
 	/* Driver specific high-precision vblank timestamping supported? */
 	if (dev->driver->get_vblank_timestamp)
 	if (dev->driver->get_vblank_timestamp)
@@ -264,14 +227,8 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
 	else
 	else
 		DRM_INFO("No driver support for vblank timestamp query.\n");
 		DRM_INFO("No driver support for vblank timestamp query.\n");
 
 
-	/* Zero per-crtc vblank stuff */
-	for (i = 0; i < num_crtcs; i++) {
-		init_waitqueue_head(&dev->vbl_queue[i]);
-		atomic_set(&dev->_vblank_count[i], 0);
-		atomic_set(&dev->vblank_refcount[i], 0);
-	}
+	dev->vblank_disable_allowed = false;
 
 
-	dev->vblank_disable_allowed = 0;
 	return 0;
 	return 0;
 
 
 err:
 err:
@@ -336,7 +293,7 @@ int drm_irq_install(struct drm_device *dev)
 		mutex_unlock(&dev->struct_mutex);
 		mutex_unlock(&dev->struct_mutex);
 		return -EBUSY;
 		return -EBUSY;
 	}
 	}
-	dev->irq_enabled = 1;
+	dev->irq_enabled = true;
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
 
 
 	DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
 	DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
@@ -359,7 +316,7 @@ int drm_irq_install(struct drm_device *dev)
 
 
 	if (ret < 0) {
 	if (ret < 0) {
 		mutex_lock(&dev->struct_mutex);
 		mutex_lock(&dev->struct_mutex);
-		dev->irq_enabled = 0;
+		dev->irq_enabled = false;
 		mutex_unlock(&dev->struct_mutex);
 		mutex_unlock(&dev->struct_mutex);
 		return ret;
 		return ret;
 	}
 	}
@@ -373,7 +330,7 @@ int drm_irq_install(struct drm_device *dev)
 
 
 	if (ret < 0) {
 	if (ret < 0) {
 		mutex_lock(&dev->struct_mutex);
 		mutex_lock(&dev->struct_mutex);
-		dev->irq_enabled = 0;
+		dev->irq_enabled = false;
 		mutex_unlock(&dev->struct_mutex);
 		mutex_unlock(&dev->struct_mutex);
 		if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		if (!drm_core_check_feature(dev, DRIVER_MODESET))
 			vga_client_register(dev->pdev, NULL, NULL, NULL);
 			vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -394,14 +351,15 @@ EXPORT_SYMBOL(drm_irq_install);
 int drm_irq_uninstall(struct drm_device *dev)
 int drm_irq_uninstall(struct drm_device *dev)
 {
 {
 	unsigned long irqflags;
 	unsigned long irqflags;
-	int irq_enabled, i;
+	bool irq_enabled;
+	int i;
 
 
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
 		return -EINVAL;
 		return -EINVAL;
 
 
 	mutex_lock(&dev->struct_mutex);
 	mutex_lock(&dev->struct_mutex);
 	irq_enabled = dev->irq_enabled;
 	irq_enabled = dev->irq_enabled;
-	dev->irq_enabled = 0;
+	dev->irq_enabled = false;
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
 
 
 	/*
 	/*
@@ -410,9 +368,9 @@ int drm_irq_uninstall(struct drm_device *dev)
 	if (dev->num_crtcs) {
 	if (dev->num_crtcs) {
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
 		for (i = 0; i < dev->num_crtcs; i++) {
 		for (i = 0; i < dev->num_crtcs; i++) {
-			DRM_WAKEUP(&dev->vbl_queue[i]);
-			dev->vblank_enabled[i] = 0;
-			dev->last_vblank[i] =
+			DRM_WAKEUP(&dev->vblank[i].queue);
+			dev->vblank[i].enabled = false;
+			dev->vblank[i].last =
 				dev->driver->get_vblank_counter(dev, i);
 				dev->driver->get_vblank_counter(dev, i);
 		}
 		}
 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@@ -628,24 +586,20 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
 	 * code gets preempted or delayed for some reason.
 	 * code gets preempted or delayed for some reason.
 	 */
 	 */
 	for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
 	for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
-		/* Disable preemption to make it very likely to
-		 * succeed in the first iteration even on PREEMPT_RT kernel.
+		/*
+		 * Get vertical and horizontal scanout position vpos, hpos,
+		 * and bounding timestamps stime, etime, pre/post query.
 		 */
 		 */
-		preempt_disable();
+		vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos,
+							       &hpos, &stime, &etime);
 
 
-		/* Get system timestamp before query. */
-		stime = ktime_get();
-
-		/* Get vertical and horizontal scanout pos. vpos, hpos. */
-		vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
-
-		/* Get system timestamp after query. */
-		etime = ktime_get();
+		/*
+		 * Get correction for CLOCK_MONOTONIC -> CLOCK_REALTIME if
+		 * CLOCK_REALTIME is requested.
+		 */
 		if (!drm_timestamp_monotonic)
 		if (!drm_timestamp_monotonic)
 			mono_time_offset = ktime_get_monotonic_offset();
 			mono_time_offset = ktime_get_monotonic_offset();
 
 
-		preempt_enable();
-
 		/* Return as no-op if scanout query unsupported or failed. */
 		/* Return as no-op if scanout query unsupported or failed. */
 		if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
 		if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
 			DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
 			DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
@@ -653,6 +607,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
 			return -EIO;
 			return -EIO;
 		}
 		}
 
 
+		/* Compute uncertainty in timestamp of scanout position query. */
 		duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
 		duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
 
 
 		/* Accept result with <  max_error nsecs timing uncertainty. */
 		/* Accept result with <  max_error nsecs timing uncertainty. */
@@ -795,7 +750,7 @@ EXPORT_SYMBOL(drm_get_last_vbltimestamp);
  */
  */
 u32 drm_vblank_count(struct drm_device *dev, int crtc)
 u32 drm_vblank_count(struct drm_device *dev, int crtc)
 {
 {
-	return atomic_read(&dev->_vblank_count[crtc]);
+	return atomic_read(&dev->vblank[crtc].count);
 }
 }
 EXPORT_SYMBOL(drm_vblank_count);
 EXPORT_SYMBOL(drm_vblank_count);
 
 
@@ -824,10 +779,10 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
 	 * a seqlock.
 	 * a seqlock.
 	 */
 	 */
 	do {
 	do {
-		cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
+		cur_vblank = atomic_read(&dev->vblank[crtc].count);
 		*vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
 		*vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
 		smp_rmb();
 		smp_rmb();
-	} while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
+	} while (cur_vblank != atomic_read(&dev->vblank[crtc].count));
 
 
 	return cur_vblank;
 	return cur_vblank;
 }
 }
@@ -914,12 +869,12 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
 	} while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
 	} while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
 
 
 	/* Deal with counter wrap */
 	/* Deal with counter wrap */
-	diff = cur_vblank - dev->last_vblank[crtc];
-	if (cur_vblank < dev->last_vblank[crtc]) {
+	diff = cur_vblank - dev->vblank[crtc].last;
+	if (cur_vblank < dev->vblank[crtc].last) {
 		diff += dev->max_vblank_count;
 		diff += dev->max_vblank_count;
 
 
 		DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
 		DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
-			  crtc, dev->last_vblank[crtc], cur_vblank, diff);
+			  crtc, dev->vblank[crtc].last, cur_vblank, diff);
 	}
 	}
 
 
 	DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
 	DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
@@ -930,12 +885,12 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
 	 * reinitialize delayed at next vblank interrupt in that case.
 	 * reinitialize delayed at next vblank interrupt in that case.
 	 */
 	 */
 	if (rc) {
 	if (rc) {
-		tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
+		tslot = atomic_read(&dev->vblank[crtc].count) + diff;
 		vblanktimestamp(dev, crtc, tslot) = t_vblank;
 		vblanktimestamp(dev, crtc, tslot) = t_vblank;
 	}
 	}
 
 
 	smp_mb__before_atomic_inc();
 	smp_mb__before_atomic_inc();
-	atomic_add(diff, &dev->_vblank_count[crtc]);
+	atomic_add(diff, &dev->vblank[crtc].count);
 	smp_mb__after_atomic_inc();
 	smp_mb__after_atomic_inc();
 }
 }
 
 
@@ -957,9 +912,9 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
 
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
 	/* Going from 0->1 means we have to enable interrupts again */
 	/* Going from 0->1 means we have to enable interrupts again */
-	if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+	if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) {
 		spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
 		spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
-		if (!dev->vblank_enabled[crtc]) {
+		if (!dev->vblank[crtc].enabled) {
 			/* Enable vblank irqs under vblank_time_lock protection.
 			/* Enable vblank irqs under vblank_time_lock protection.
 			 * All vblank count & timestamp updates are held off
 			 * All vblank count & timestamp updates are held off
 			 * until we are done reinitializing master counter and
 			 * until we are done reinitializing master counter and
@@ -970,16 +925,16 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
 			DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
 			DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
 				  crtc, ret);
 				  crtc, ret);
 			if (ret)
 			if (ret)
-				atomic_dec(&dev->vblank_refcount[crtc]);
+				atomic_dec(&dev->vblank[crtc].refcount);
 			else {
 			else {
-				dev->vblank_enabled[crtc] = 1;
+				dev->vblank[crtc].enabled = true;
 				drm_update_vblank_count(dev, crtc);
 				drm_update_vblank_count(dev, crtc);
 			}
 			}
 		}
 		}
 		spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
 		spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
 	} else {
 	} else {
-		if (!dev->vblank_enabled[crtc]) {
-			atomic_dec(&dev->vblank_refcount[crtc]);
+		if (!dev->vblank[crtc].enabled) {
+			atomic_dec(&dev->vblank[crtc].refcount);
 			ret = -EINVAL;
 			ret = -EINVAL;
 		}
 		}
 	}
 	}
@@ -999,10 +954,10 @@ EXPORT_SYMBOL(drm_vblank_get);
  */
  */
 void drm_vblank_put(struct drm_device *dev, int crtc)
 void drm_vblank_put(struct drm_device *dev, int crtc)
 {
 {
-	BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0);
+	BUG_ON(atomic_read(&dev->vblank[crtc].refcount) == 0);
 
 
 	/* Last user schedules interrupt disable */
 	/* Last user schedules interrupt disable */
-	if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) &&
+	if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
 	    (drm_vblank_offdelay > 0))
 	    (drm_vblank_offdelay > 0))
 		mod_timer(&dev->vblank_disable_timer,
 		mod_timer(&dev->vblank_disable_timer,
 			  jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
 			  jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
@@ -1025,7 +980,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
 
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
 	vblank_disable_and_save(dev, crtc);
 	vblank_disable_and_save(dev, crtc);
-	DRM_WAKEUP(&dev->vbl_queue[crtc]);
+	DRM_WAKEUP(&dev->vblank[crtc].queue);
 
 
 	/* Send any queued vblank events, lest the natives grow disquiet */
 	/* Send any queued vblank events, lest the natives grow disquiet */
 	seq = drm_vblank_count_and_time(dev, crtc, &now);
 	seq = drm_vblank_count_and_time(dev, crtc, &now);
@@ -1067,10 +1022,10 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
 	 * to avoid corrupting the count if multiple, mismatch calls occur),
 	 * to avoid corrupting the count if multiple, mismatch calls occur),
 	 * so that interrupts remain enabled in the interim.
 	 * so that interrupts remain enabled in the interim.
 	 */
 	 */
-	if (!dev->vblank_inmodeset[crtc]) {
-		dev->vblank_inmodeset[crtc] = 0x1;
+	if (!dev->vblank[crtc].inmodeset) {
+		dev->vblank[crtc].inmodeset = 0x1;
 		if (drm_vblank_get(dev, crtc) == 0)
 		if (drm_vblank_get(dev, crtc) == 0)
-			dev->vblank_inmodeset[crtc] |= 0x2;
+			dev->vblank[crtc].inmodeset |= 0x2;
 	}
 	}
 }
 }
 EXPORT_SYMBOL(drm_vblank_pre_modeset);
 EXPORT_SYMBOL(drm_vblank_pre_modeset);
@@ -1083,15 +1038,15 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
 	if (!dev->num_crtcs)
 	if (!dev->num_crtcs)
 		return;
 		return;
 
 
-	if (dev->vblank_inmodeset[crtc]) {
+	if (dev->vblank[crtc].inmodeset) {
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
-		dev->vblank_disable_allowed = 1;
+		dev->vblank_disable_allowed = true;
 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 
 
-		if (dev->vblank_inmodeset[crtc] & 0x2)
+		if (dev->vblank[crtc].inmodeset & 0x2)
 			drm_vblank_put(dev, crtc);
 			drm_vblank_put(dev, crtc);
 
 
-		dev->vblank_inmodeset[crtc] = 0;
+		dev->vblank[crtc].inmodeset = 0;
 	}
 	}
 }
 }
 EXPORT_SYMBOL(drm_vblank_post_modeset);
 EXPORT_SYMBOL(drm_vblank_post_modeset);
@@ -1288,8 +1243,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
 
 
 	DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
 	DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
 		  vblwait->request.sequence, crtc);
 		  vblwait->request.sequence, crtc);
-	dev->last_vblank_wait[crtc] = vblwait->request.sequence;
-	DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
+	dev->vblank[crtc].last_wait = vblwait->request.sequence;
+	DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * DRM_HZ,
 		    (((drm_vblank_count(dev, crtc) -
 		    (((drm_vblank_count(dev, crtc) -
 		       vblwait->request.sequence) <= (1 << 23)) ||
 		       vblwait->request.sequence) <= (1 << 23)) ||
 		     !dev->irq_enabled));
 		     !dev->irq_enabled));
@@ -1367,7 +1322,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
 	spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
 	spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
 
 
 	/* Vblank irq handling disabled. Nothing to do. */
 	/* Vblank irq handling disabled. Nothing to do. */
-	if (!dev->vblank_enabled[crtc]) {
+	if (!dev->vblank[crtc].enabled) {
 		spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
 		spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
 		return false;
 		return false;
 	}
 	}
@@ -1377,7 +1332,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
 	 */
 	 */
 
 
 	/* Get current timestamp and count. */
 	/* Get current timestamp and count. */
-	vblcount = atomic_read(&dev->_vblank_count[crtc]);
+	vblcount = atomic_read(&dev->vblank[crtc].count);
 	drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
 	drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
 
 
 	/* Compute time difference to timestamp of last vblank */
 	/* Compute time difference to timestamp of last vblank */
@@ -1401,14 +1356,14 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
 		 * the timestamp computed above.
 		 * the timestamp computed above.
 		 */
 		 */
 		smp_mb__before_atomic_inc();
 		smp_mb__before_atomic_inc();
-		atomic_inc(&dev->_vblank_count[crtc]);
+		atomic_inc(&dev->vblank[crtc].count);
 		smp_mb__after_atomic_inc();
 		smp_mb__after_atomic_inc();
 	} else {
 	} else {
 		DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
 		DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
 			  crtc, (int) diff_ns);
 			  crtc, (int) diff_ns);
 	}
 	}
 
 
-	DRM_WAKEUP(&dev->vbl_queue[crtc]);
+	DRM_WAKEUP(&dev->vblank[crtc].queue);
 	drm_handle_vblank_events(dev, crtc);
 	drm_handle_vblank_events(dev, crtc);
 
 
 	spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
 	spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);

+ 0 - 3
drivers/gpu/drm/drm_lock.c

@@ -86,7 +86,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
 		if (drm_lock_take(&master->lock, lock->context)) {
 		if (drm_lock_take(&master->lock, lock->context)) {
 			master->lock.file_priv = file_priv;
 			master->lock.file_priv = file_priv;
 			master->lock.lock_time = jiffies;
 			master->lock.lock_time = jiffies;
-			atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
 			break;	/* Got lock */
 			break;	/* Got lock */
 		}
 		}
 
 
@@ -157,8 +156,6 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
-
 	if (drm_lock_free(&master->lock, lock->context)) {
 	if (drm_lock_free(&master->lock, lock->context)) {
 		/* FIXME: Should really bail out here. */
 		/* FIXME: Should really bail out here. */
 	}
 	}

+ 35 - 8
drivers/gpu/drm/drm_modes.c

@@ -707,18 +707,25 @@ EXPORT_SYMBOL(drm_mode_vrefresh);
 /**
 /**
  * drm_mode_set_crtcinfo - set CRTC modesetting parameters
  * drm_mode_set_crtcinfo - set CRTC modesetting parameters
  * @p: mode
  * @p: mode
- * @adjust_flags: unused? (FIXME)
+ * @adjust_flags: a combination of adjustment flags
  *
  *
  * LOCKING:
  * LOCKING:
  * None.
  * None.
  *
  *
  * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
  * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
+ *
+ * - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of
+ *   interlaced modes.
+ * - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
+ *   buffers containing two eyes (only adjust the timings when needed, eg. for
+ *   "frame packing" or "side by side full").
  */
  */
 void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
 void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
 {
 {
 	if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
 	if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
 		return;
 		return;
 
 
+	p->crtc_clock = p->clock;
 	p->crtc_hdisplay = p->hdisplay;
 	p->crtc_hdisplay = p->hdisplay;
 	p->crtc_hsync_start = p->hsync_start;
 	p->crtc_hsync_start = p->hsync_start;
 	p->crtc_hsync_end = p->hsync_end;
 	p->crtc_hsync_end = p->hsync_end;
@@ -752,6 +759,20 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
 		p->crtc_vtotal *= p->vscan;
 		p->crtc_vtotal *= p->vscan;
 	}
 	}
 
 
+	if (adjust_flags & CRTC_STEREO_DOUBLE) {
+		unsigned int layout = p->flags & DRM_MODE_FLAG_3D_MASK;
+
+		switch (layout) {
+		case DRM_MODE_FLAG_3D_FRAME_PACKING:
+			p->crtc_clock *= 2;
+			p->crtc_vdisplay += p->crtc_vtotal;
+			p->crtc_vsync_start += p->crtc_vtotal;
+			p->crtc_vsync_end += p->crtc_vtotal;
+			p->crtc_vtotal += p->crtc_vtotal;
+			break;
+		}
+	}
+
 	p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
 	p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
 	p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
 	p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
 	p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
 	p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
@@ -830,12 +851,16 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ
 	} else if (mode1->clock != mode2->clock)
 	} else if (mode1->clock != mode2->clock)
 		return false;
 		return false;
 
 
-	return drm_mode_equal_no_clocks(mode1, mode2);
+	if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) !=
+	    (mode2->flags & DRM_MODE_FLAG_3D_MASK))
+		return false;
+
+	return drm_mode_equal_no_clocks_no_stereo(mode1, mode2);
 }
 }
 EXPORT_SYMBOL(drm_mode_equal);
 EXPORT_SYMBOL(drm_mode_equal);
 
 
 /**
 /**
- * drm_mode_equal_no_clocks - test modes for equality
+ * drm_mode_equal_no_clocks_no_stereo - test modes for equality
  * @mode1: first mode
  * @mode1: first mode
  * @mode2: second mode
  * @mode2: second mode
  *
  *
@@ -843,12 +868,13 @@ EXPORT_SYMBOL(drm_mode_equal);
  * None.
  * None.
  *
  *
  * Check to see if @mode1 and @mode2 are equivalent, but
  * Check to see if @mode1 and @mode2 are equivalent, but
- * don't check the pixel clocks.
+ * don't check the pixel clocks nor the stereo layout.
  *
  *
  * RETURNS:
  * RETURNS:
  * True if the modes are equal, false otherwise.
  * True if the modes are equal, false otherwise.
  */
  */
-bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
+bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
+					const struct drm_display_mode *mode2)
 {
 {
 	if (mode1->hdisplay == mode2->hdisplay &&
 	if (mode1->hdisplay == mode2->hdisplay &&
 	    mode1->hsync_start == mode2->hsync_start &&
 	    mode1->hsync_start == mode2->hsync_start &&
@@ -860,12 +886,13 @@ bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct
 	    mode1->vsync_end == mode2->vsync_end &&
 	    mode1->vsync_end == mode2->vsync_end &&
 	    mode1->vtotal == mode2->vtotal &&
 	    mode1->vtotal == mode2->vtotal &&
 	    mode1->vscan == mode2->vscan &&
 	    mode1->vscan == mode2->vscan &&
-	    mode1->flags == mode2->flags)
+	    (mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
+	     (mode2->flags & ~DRM_MODE_FLAG_3D_MASK))
 		return true;
 		return true;
 
 
 	return false;
 	return false;
 }
 }
-EXPORT_SYMBOL(drm_mode_equal_no_clocks);
+EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
 
 
 /**
 /**
  * drm_mode_validate_size - make sure modes adhere to size constraints
  * drm_mode_validate_size - make sure modes adhere to size constraints
@@ -1014,7 +1041,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
 				/* if equal delete the probed mode */
 				/* if equal delete the probed mode */
 				mode->status = pmode->status;
 				mode->status = pmode->status;
 				/* Merge type bits together */
 				/* Merge type bits together */
-				mode->type |= pmode->type;
+				mode->type = pmode->type;
 				list_del(&pmode->head);
 				list_del(&pmode->head);
 				drm_mode_destroy(connector->dev, pmode);
 				drm_mode_destroy(connector->dev, pmode);
 				break;
 				break;

+ 11 - 58
drivers/gpu/drm/drm_pci.c

@@ -80,7 +80,7 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
 	/* Reserve */
 	/* Reserve */
 	for (addr = (unsigned long)dmah->vaddr, sz = size;
 	for (addr = (unsigned long)dmah->vaddr, sz = size;
 	     sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
 	     sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
-		SetPageReserved(virt_to_page(addr));
+		SetPageReserved(virt_to_page((void *)addr));
 	}
 	}
 
 
 	return dmah;
 	return dmah;
@@ -103,7 +103,7 @@ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
 		/* Unreserve */
 		/* Unreserve */
 		for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
 		for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
 		     sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
 		     sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
-			ClearPageReserved(virt_to_page(addr));
+			ClearPageReserved(virt_to_page((void *)addr));
 		}
 		}
 		dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
 		dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
 				  dmah->busaddr);
 				  dmah->busaddr);
@@ -322,83 +322,36 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
 
 
 	DRM_DEBUG("\n");
 	DRM_DEBUG("\n");
 
 
-	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	dev = drm_dev_alloc(driver, &pdev->dev);
 	if (!dev)
 	if (!dev)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	ret = pci_enable_device(pdev);
 	ret = pci_enable_device(pdev);
 	if (ret)
 	if (ret)
-		goto err_g1;
+		goto err_free;
 
 
 	dev->pdev = pdev;
 	dev->pdev = pdev;
-	dev->dev = &pdev->dev;
-
-	dev->pci_device = pdev->device;
-	dev->pci_vendor = pdev->vendor;
-
 #ifdef __alpha__
 #ifdef __alpha__
 	dev->hose = pdev->sysdata;
 	dev->hose = pdev->sysdata;
 #endif
 #endif
 
 
-	mutex_lock(&drm_global_mutex);
-
-	if ((ret = drm_fill_in_dev(dev, ent, driver))) {
-		printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
-		goto err_g2;
-	}
-
-	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		pci_set_drvdata(pdev, dev);
 		pci_set_drvdata(pdev, dev);
-		ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
-		if (ret)
-			goto err_g2;
-	}
-
-	if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
-		ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
-		if (ret)
-			goto err_g21;
-	}
-
-	if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
-		goto err_g3;
-
-	if (dev->driver->load) {
-		ret = dev->driver->load(dev, ent->driver_data);
-		if (ret)
-			goto err_g4;
-	}
 
 
-	/* setup the grouping for the legacy output */
-	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		ret = drm_mode_group_init_legacy_group(dev,
-						&dev->primary->mode_group);
-		if (ret)
-			goto err_g4;
-	}
-
-	list_add_tail(&dev->driver_item, &driver->device_list);
+	ret = drm_dev_register(dev, ent->driver_data);
+	if (ret)
+		goto err_pci;
 
 
 	DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
 	DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
 		 driver->name, driver->major, driver->minor, driver->patchlevel,
 		 driver->name, driver->major, driver->minor, driver->patchlevel,
 		 driver->date, pci_name(pdev), dev->primary->index);
 		 driver->date, pci_name(pdev), dev->primary->index);
 
 
-	mutex_unlock(&drm_global_mutex);
 	return 0;
 	return 0;
 
 
-err_g4:
-	drm_put_minor(&dev->primary);
-err_g3:
-	if (dev->render)
-		drm_put_minor(&dev->render);
-err_g21:
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
-		drm_put_minor(&dev->control);
-err_g2:
+err_pci:
 	pci_disable_device(pdev);
 	pci_disable_device(pdev);
-err_g1:
-	kfree(dev);
-	mutex_unlock(&drm_global_mutex);
+err_free:
+	drm_dev_free(dev);
 	return ret;
 	return ret;
 }
 }
 EXPORT_SYMBOL(drm_get_pci_dev);
 EXPORT_SYMBOL(drm_get_pci_dev);

+ 5 - 54
drivers/gpu/drm/drm_platform.c

@@ -47,55 +47,15 @@ static int drm_get_platform_dev(struct platform_device *platdev,
 
 
 	DRM_DEBUG("\n");
 	DRM_DEBUG("\n");
 
 
-	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	dev = drm_dev_alloc(driver, &platdev->dev);
 	if (!dev)
 	if (!dev)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	dev->platformdev = platdev;
 	dev->platformdev = platdev;
-	dev->dev = &platdev->dev;
 
 
-	mutex_lock(&drm_global_mutex);
-
-	ret = drm_fill_in_dev(dev, NULL, driver);
-
-	if (ret) {
-		printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
-		goto err_g1;
-	}
-
-	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
-		if (ret)
-			goto err_g1;
-	}
-
-	if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
-		ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
-		if (ret)
-			goto err_g11;
-	}
-
-	ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+	ret = drm_dev_register(dev, 0);
 	if (ret)
 	if (ret)
-		goto err_g2;
-
-	if (dev->driver->load) {
-		ret = dev->driver->load(dev, 0);
-		if (ret)
-			goto err_g3;
-	}
-
-	/* setup the grouping for the legacy output */
-	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		ret = drm_mode_group_init_legacy_group(dev,
-				&dev->primary->mode_group);
-		if (ret)
-			goto err_g3;
-	}
-
-	list_add_tail(&dev->driver_item, &driver->device_list);
-
-	mutex_unlock(&drm_global_mutex);
+		goto err_free;
 
 
 	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
 	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
 		 driver->name, driver->major, driver->minor, driver->patchlevel,
 		 driver->name, driver->major, driver->minor, driver->patchlevel,
@@ -103,17 +63,8 @@ static int drm_get_platform_dev(struct platform_device *platdev,
 
 
 	return 0;
 	return 0;
 
 
-err_g3:
-	drm_put_minor(&dev->primary);
-err_g2:
-	if (dev->render)
-		drm_put_minor(&dev->render);
-err_g11:
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
-		drm_put_minor(&dev->control);
-err_g1:
-	kfree(dev);
-	mutex_unlock(&drm_global_mutex);
+err_free:
+	drm_dev_free(dev);
 	return ret;
 	return ret;
 }
 }
 
 

+ 1 - 2
drivers/gpu/drm/drm_prime.c

@@ -637,14 +637,13 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
 	unsigned count;
 	unsigned count;
 	struct scatterlist *sg;
 	struct scatterlist *sg;
 	struct page *page;
 	struct page *page;
-	u32 len, offset;
+	u32 len;
 	int pg_index;
 	int pg_index;
 	dma_addr_t addr;
 	dma_addr_t addr;
 
 
 	pg_index = 0;
 	pg_index = 0;
 	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
 	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
 		len = sg->length;
 		len = sg->length;
-		offset = sg->offset;
 		page = sg_page(sg);
 		page = sg_page(sg);
 		addr = sg_dma_address(sg);
 		addr = sg_dma_address(sg);
 
 

+ 242 - 120
drivers/gpu/drm/drm_stub.c

@@ -254,81 +254,21 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
 	return 0;
 	return 0;
 }
 }
 
 
-int drm_fill_in_dev(struct drm_device *dev,
-			   const struct pci_device_id *ent,
-			   struct drm_driver *driver)
-{
-	int retcode;
-
-	INIT_LIST_HEAD(&dev->filelist);
-	INIT_LIST_HEAD(&dev->ctxlist);
-	INIT_LIST_HEAD(&dev->vmalist);
-	INIT_LIST_HEAD(&dev->maplist);
-	INIT_LIST_HEAD(&dev->vblank_event_list);
-
-	spin_lock_init(&dev->count_lock);
-	spin_lock_init(&dev->event_lock);
-	mutex_init(&dev->struct_mutex);
-	mutex_init(&dev->ctxlist_mutex);
-
-	if (drm_ht_create(&dev->map_hash, 12)) {
-		return -ENOMEM;
-	}
-
-	/* the DRM has 6 basic counters */
-	dev->counters = 6;
-	dev->types[0] = _DRM_STAT_LOCK;
-	dev->types[1] = _DRM_STAT_OPENS;
-	dev->types[2] = _DRM_STAT_CLOSES;
-	dev->types[3] = _DRM_STAT_IOCTLS;
-	dev->types[4] = _DRM_STAT_LOCKS;
-	dev->types[5] = _DRM_STAT_UNLOCKS;
-
-	dev->driver = driver;
-
-	if (dev->driver->bus->agp_init) {
-		retcode = dev->driver->bus->agp_init(dev);
-		if (retcode)
-			goto error_out_unreg;
-	}
-
-
-
-	retcode = drm_ctxbitmap_init(dev);
-	if (retcode) {
-		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
-		goto error_out_unreg;
-	}
-
-	if (driver->driver_features & DRIVER_GEM) {
-		retcode = drm_gem_init(dev);
-		if (retcode) {
-			DRM_ERROR("Cannot initialize graphics execution "
-				  "manager (GEM)\n");
-			goto error_out_unreg;
-		}
-	}
-
-	return 0;
-
-      error_out_unreg:
-	drm_lastclose(dev);
-	return retcode;
-}
-EXPORT_SYMBOL(drm_fill_in_dev);
-
-
 /**
 /**
- * Get a secondary minor number.
+ * drm_get_minor - Allocate and register new DRM minor
+ * @dev: DRM device
+ * @minor: Pointer to where new minor is stored
+ * @type: Type of minor
  *
  *
- * \param dev device data structure
- * \param sec-minor structure to hold the assigned minor
- * \return negative number on failure.
+ * Allocate a new minor of the given type and register it. A pointer to the new
+ * minor is returned in @minor.
+ * Caller must hold the global DRM mutex.
  *
  *
- * Search an empty entry and initialize it to the given parameters. This
- * routines assigns minor numbers to secondary heads of multi-headed cards
+ * RETURNS:
+ * 0 on success, negative error code on failure.
  */
  */
-int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
+static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor,
+			 int type)
 {
 {
 	struct drm_minor *new_minor;
 	struct drm_minor *new_minor;
 	int ret;
 	int ret;
@@ -385,37 +325,48 @@ err_idr:
 	*minor = NULL;
 	*minor = NULL;
 	return ret;
 	return ret;
 }
 }
-EXPORT_SYMBOL(drm_get_minor);
 
 
 /**
 /**
- * Put a secondary minor number.
+ * drm_unplug_minor - Unplug DRM minor
+ * @minor: Minor to unplug
  *
  *
- * \param sec_minor - structure to be released
- * \return always zero
+ * Unplugs the given DRM minor but keeps the object. So after this returns,
+ * minor->dev is still valid so existing open-files can still access it to get
+ * device information from their drm_file ojects.
+ * If the minor is already unplugged or if @minor is NULL, nothing is done.
+ * The global DRM mutex must be held by the caller.
  */
  */
-int drm_put_minor(struct drm_minor **minor_p)
+static void drm_unplug_minor(struct drm_minor *minor)
 {
 {
-	struct drm_minor *minor = *minor_p;
-
-	DRM_DEBUG("release secondary minor %d\n", minor->index);
+	if (!minor || !device_is_registered(minor->kdev))
+		return;
 
 
 #if defined(CONFIG_DEBUG_FS)
 #if defined(CONFIG_DEBUG_FS)
 	drm_debugfs_cleanup(minor);
 	drm_debugfs_cleanup(minor);
 #endif
 #endif
 
 
 	drm_sysfs_device_remove(minor);
 	drm_sysfs_device_remove(minor);
-
 	idr_remove(&drm_minors_idr, minor->index);
 	idr_remove(&drm_minors_idr, minor->index);
-
-	kfree(minor);
-	*minor_p = NULL;
-	return 0;
 }
 }
-EXPORT_SYMBOL(drm_put_minor);
 
 
-static void drm_unplug_minor(struct drm_minor *minor)
+/**
+ * drm_put_minor - Destroy DRM minor
+ * @minor: Minor to destroy
+ *
+ * This calls drm_unplug_minor() on the given minor and then frees it. Nothing
+ * is done if @minor is NULL. It is fine to call this on already unplugged
+ * minors.
+ * The global DRM mutex must be held by the caller.
+ */
+static void drm_put_minor(struct drm_minor *minor)
 {
 {
-	drm_sysfs_device_remove(minor);
+	if (!minor)
+		return;
+
+	DRM_DEBUG("release secondary minor %d\n", minor->index);
+
+	drm_unplug_minor(minor);
+	kfree(minor);
 }
 }
 
 
 /**
 /**
@@ -427,66 +378,237 @@ static void drm_unplug_minor(struct drm_minor *minor)
  */
  */
 void drm_put_dev(struct drm_device *dev)
 void drm_put_dev(struct drm_device *dev)
 {
 {
-	struct drm_driver *driver;
-	struct drm_map_list *r_list, *list_temp;
-
 	DRM_DEBUG("\n");
 	DRM_DEBUG("\n");
 
 
 	if (!dev) {
 	if (!dev) {
 		DRM_ERROR("cleanup called no dev\n");
 		DRM_ERROR("cleanup called no dev\n");
 		return;
 		return;
 	}
 	}
-	driver = dev->driver;
 
 
-	drm_lastclose(dev);
+	drm_dev_unregister(dev);
+	drm_dev_free(dev);
+}
+EXPORT_SYMBOL(drm_put_dev);
 
 
-	if (dev->driver->unload)
-		dev->driver->unload(dev);
+void drm_unplug_dev(struct drm_device *dev)
+{
+	/* for a USB device */
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		drm_unplug_minor(dev->control);
+	if (dev->render)
+		drm_unplug_minor(dev->render);
+	drm_unplug_minor(dev->primary);
 
 
-	if (dev->driver->bus->agp_destroy)
-		dev->driver->bus->agp_destroy(dev);
+	mutex_lock(&drm_global_mutex);
 
 
-	drm_vblank_cleanup(dev);
+	drm_device_set_unplugged(dev);
 
 
-	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
-		drm_rmmap(dev, r_list->map);
-	drm_ht_remove(&dev->map_hash);
+	if (dev->open_count == 0) {
+		drm_put_dev(dev);
+	}
+	mutex_unlock(&drm_global_mutex);
+}
+EXPORT_SYMBOL(drm_unplug_dev);
 
 
-	drm_ctxbitmap_cleanup(dev);
+/**
+ * drm_dev_alloc - Allocate new drm device
+ * @driver: DRM driver to allocate device for
+ * @parent: Parent device object
+ *
+ * Allocate and initialize a new DRM device. No device registration is done.
+ * Call drm_dev_register() to advertice the device to user space and register it
+ * with other core subsystems.
+ *
+ * RETURNS:
+ * Pointer to new DRM device, or NULL if out of memory.
+ */
+struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+				 struct device *parent)
+{
+	struct drm_device *dev;
+	int ret;
 
 
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
-		drm_put_minor(&dev->control);
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return NULL;
 
 
-	if (dev->render)
-		drm_put_minor(&dev->render);
+	dev->dev = parent;
+	dev->driver = driver;
 
 
-	if (driver->driver_features & DRIVER_GEM)
+	INIT_LIST_HEAD(&dev->filelist);
+	INIT_LIST_HEAD(&dev->ctxlist);
+	INIT_LIST_HEAD(&dev->vmalist);
+	INIT_LIST_HEAD(&dev->maplist);
+	INIT_LIST_HEAD(&dev->vblank_event_list);
+
+	spin_lock_init(&dev->count_lock);
+	spin_lock_init(&dev->event_lock);
+	mutex_init(&dev->struct_mutex);
+	mutex_init(&dev->ctxlist_mutex);
+
+	if (drm_ht_create(&dev->map_hash, 12))
+		goto err_free;
+
+	ret = drm_ctxbitmap_init(dev);
+	if (ret) {
+		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+		goto err_ht;
+	}
+
+	if (driver->driver_features & DRIVER_GEM) {
+		ret = drm_gem_init(dev);
+		if (ret) {
+			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
+			goto err_ctxbitmap;
+		}
+	}
+
+	return dev;
+
+err_ctxbitmap:
+	drm_ctxbitmap_cleanup(dev);
+err_ht:
+	drm_ht_remove(&dev->map_hash);
+err_free:
+	kfree(dev);
+	return NULL;
+}
+EXPORT_SYMBOL(drm_dev_alloc);
+
+/**
+ * drm_dev_free - Free DRM device
+ * @dev: DRM device to free
+ *
+ * Free a DRM device that has previously been allocated via drm_dev_alloc().
+ * You must not use kfree() instead or you will leak memory.
+ *
+ * This must not be called once the device got registered. Use drm_put_dev()
+ * instead, which then calls drm_dev_free().
+ */
+void drm_dev_free(struct drm_device *dev)
+{
+	drm_put_minor(dev->control);
+	drm_put_minor(dev->render);
+	drm_put_minor(dev->primary);
+
+	if (dev->driver->driver_features & DRIVER_GEM)
 		drm_gem_destroy(dev);
 		drm_gem_destroy(dev);
 
 
-	drm_put_minor(&dev->primary);
+	drm_ctxbitmap_cleanup(dev);
+	drm_ht_remove(&dev->map_hash);
 
 
-	list_del(&dev->driver_item);
 	kfree(dev->devname);
 	kfree(dev->devname);
 	kfree(dev);
 	kfree(dev);
 }
 }
-EXPORT_SYMBOL(drm_put_dev);
+EXPORT_SYMBOL(drm_dev_free);
 
 
-void drm_unplug_dev(struct drm_device *dev)
+/**
+ * drm_dev_register - Register DRM device
+ * @dev: Device to register
+ *
+ * Register the DRM device @dev with the system, advertise device to user-space
+ * and start normal device operation. @dev must be allocated via drm_dev_alloc()
+ * previously.
+ *
+ * Never call this twice on any device!
+ *
+ * RETURNS:
+ * 0 on success, negative error code on failure.
+ */
+int drm_dev_register(struct drm_device *dev, unsigned long flags)
 {
 {
-	/* for a USB device */
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
-		drm_unplug_minor(dev->control);
-	if (dev->render)
-		drm_unplug_minor(dev->render);
-	drm_unplug_minor(dev->primary);
+	int ret;
 
 
 	mutex_lock(&drm_global_mutex);
 	mutex_lock(&drm_global_mutex);
 
 
-	drm_device_set_unplugged(dev);
+	if (dev->driver->bus->agp_init) {
+		ret = dev->driver->bus->agp_init(dev);
+		if (ret)
+			goto out_unlock;
+	}
 
 
-	if (dev->open_count == 0) {
-		drm_put_dev(dev);
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+		if (ret)
+			goto err_agp;
+	}
+
+	if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
+		ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
+		if (ret)
+			goto err_control_node;
+	}
+
+	ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+	if (ret)
+		goto err_render_node;
+
+	if (dev->driver->load) {
+		ret = dev->driver->load(dev, flags);
+		if (ret)
+			goto err_primary_node;
 	}
 	}
+
+	/* setup grouping for legacy outputs */
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		ret = drm_mode_group_init_legacy_group(dev,
+				&dev->primary->mode_group);
+		if (ret)
+			goto err_unload;
+	}
+
+	list_add_tail(&dev->driver_item, &dev->driver->device_list);
+
+	ret = 0;
+	goto out_unlock;
+
+err_unload:
+	if (dev->driver->unload)
+		dev->driver->unload(dev);
+err_primary_node:
+	drm_put_minor(dev->primary);
+err_render_node:
+	drm_put_minor(dev->render);
+err_control_node:
+	drm_put_minor(dev->control);
+err_agp:
+	if (dev->driver->bus->agp_destroy)
+		dev->driver->bus->agp_destroy(dev);
+out_unlock:
 	mutex_unlock(&drm_global_mutex);
 	mutex_unlock(&drm_global_mutex);
+	return ret;
 }
 }
-EXPORT_SYMBOL(drm_unplug_dev);
+EXPORT_SYMBOL(drm_dev_register);
+
+/**
+ * drm_dev_unregister - Unregister DRM device
+ * @dev: Device to unregister
+ *
+ * Unregister the DRM device from the system. This does the reverse of
+ * drm_dev_register() but does not deallocate the device. The caller must call
+ * drm_dev_free() to free all resources.
+ */
+void drm_dev_unregister(struct drm_device *dev)
+{
+	struct drm_map_list *r_list, *list_temp;
+
+	drm_lastclose(dev);
+
+	if (dev->driver->unload)
+		dev->driver->unload(dev);
+
+	if (dev->driver->bus->agp_destroy)
+		dev->driver->bus->agp_destroy(dev);
+
+	drm_vblank_cleanup(dev);
+
+	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
+		drm_rmmap(dev, r_list->map);
+
+	drm_unplug_minor(dev->control);
+	drm_unplug_minor(dev->render);
+	drm_unplug_minor(dev->primary);
+
+	list_del(&dev->driver_item);
+}
+EXPORT_SYMBOL(drm_dev_unregister);

+ 31 - 65
drivers/gpu/drm/drm_sysfs.c

@@ -22,8 +22,8 @@
 #include <drm/drm_core.h>
 #include <drm/drm_core.h>
 #include <drm/drmP.h>
 #include <drm/drmP.h>
 
 
-#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
-#define to_drm_connector(d) container_of(d, struct drm_connector, kdev)
+#define to_drm_minor(d) dev_get_drvdata(d)
+#define to_drm_connector(d) dev_get_drvdata(d)
 
 
 static struct device_type drm_sysfs_device_minor = {
 static struct device_type drm_sysfs_device_minor = {
 	.name = "drm_minor"
 	.name = "drm_minor"
@@ -162,20 +162,6 @@ void drm_sysfs_destroy(void)
 	drm_class = NULL;
 	drm_class = NULL;
 }
 }
 
 
-/**
- * drm_sysfs_device_release - do nothing
- * @dev: Linux device
- *
- * Normally, this would free the DRM device associated with @dev, along
- * with cleaning up any other stuff.  But we do that in the DRM core, so
- * this function can just return and hope that the core does its job.
- */
-static void drm_sysfs_device_release(struct device *dev)
-{
-	memset(dev, 0, sizeof(struct device));
-	return;
-}
-
 /*
 /*
  * Connector properties
  * Connector properties
  */
  */
@@ -380,11 +366,6 @@ static struct bin_attribute edid_attr = {
  * properties (so far, connection status, dpms, mode list & edid) and
  * properties (so far, connection status, dpms, mode list & edid) and
  * generate a hotplug event so userspace knows there's a new connector
  * generate a hotplug event so userspace knows there's a new connector
  * available.
  * available.
- *
- * Note:
- * This routine should only be called *once* for each registered connector.
- * A second call for an already registered connector will trigger the BUG_ON
- * below.
  */
  */
 int drm_sysfs_connector_add(struct drm_connector *connector)
 int drm_sysfs_connector_add(struct drm_connector *connector)
 {
 {
@@ -394,29 +375,25 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
 	int i;
 	int i;
 	int ret;
 	int ret;
 
 
-	/* We shouldn't get called more than once for the same connector */
-	BUG_ON(device_is_registered(&connector->kdev));
-
-	connector->kdev.parent = &dev->primary->kdev;
-	connector->kdev.class = drm_class;
-	connector->kdev.release = drm_sysfs_device_release;
+	if (connector->kdev)
+		return 0;
 
 
+	connector->kdev = device_create(drm_class, dev->primary->kdev,
+					0, connector, "card%d-%s",
+					dev->primary->index, drm_get_connector_name(connector));
 	DRM_DEBUG("adding \"%s\" to sysfs\n",
 	DRM_DEBUG("adding \"%s\" to sysfs\n",
 		  drm_get_connector_name(connector));
 		  drm_get_connector_name(connector));
 
 
-	dev_set_name(&connector->kdev, "card%d-%s",
-		     dev->primary->index, drm_get_connector_name(connector));
-	ret = device_register(&connector->kdev);
-
-	if (ret) {
-		DRM_ERROR("failed to register connector device: %d\n", ret);
+	if (IS_ERR(connector->kdev)) {
+		DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
+		ret = PTR_ERR(connector->kdev);
 		goto out;
 		goto out;
 	}
 	}
 
 
 	/* Standard attributes */
 	/* Standard attributes */
 
 
 	for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
 	for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
-		ret = device_create_file(&connector->kdev, &connector_attrs[attr_cnt]);
+		ret = device_create_file(connector->kdev, &connector_attrs[attr_cnt]);
 		if (ret)
 		if (ret)
 			goto err_out_files;
 			goto err_out_files;
 	}
 	}
@@ -433,7 +410,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
 		case DRM_MODE_CONNECTOR_Component:
 		case DRM_MODE_CONNECTOR_Component:
 		case DRM_MODE_CONNECTOR_TV:
 		case DRM_MODE_CONNECTOR_TV:
 			for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
 			for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
-				ret = device_create_file(&connector->kdev, &connector_attrs_opt1[opt_cnt]);
+				ret = device_create_file(connector->kdev, &connector_attrs_opt1[opt_cnt]);
 				if (ret)
 				if (ret)
 					goto err_out_files;
 					goto err_out_files;
 			}
 			}
@@ -442,7 +419,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
 			break;
 			break;
 	}
 	}
 
 
-	ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr);
+	ret = sysfs_create_bin_file(&connector->kdev->kobj, &edid_attr);
 	if (ret)
 	if (ret)
 		goto err_out_files;
 		goto err_out_files;
 
 
@@ -453,10 +430,10 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
 
 
 err_out_files:
 err_out_files:
 	for (i = 0; i < opt_cnt; i++)
 	for (i = 0; i < opt_cnt; i++)
-		device_remove_file(&connector->kdev, &connector_attrs_opt1[i]);
+		device_remove_file(connector->kdev, &connector_attrs_opt1[i]);
 	for (i = 0; i < attr_cnt; i++)
 	for (i = 0; i < attr_cnt; i++)
-		device_remove_file(&connector->kdev, &connector_attrs[i]);
-	device_unregister(&connector->kdev);
+		device_remove_file(connector->kdev, &connector_attrs[i]);
+	device_unregister(connector->kdev);
 
 
 out:
 out:
 	return ret;
 	return ret;
@@ -480,16 +457,16 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
 {
 {
 	int i;
 	int i;
 
 
-	if (!connector->kdev.parent)
+	if (!connector->kdev)
 		return;
 		return;
 	DRM_DEBUG("removing \"%s\" from sysfs\n",
 	DRM_DEBUG("removing \"%s\" from sysfs\n",
 		  drm_get_connector_name(connector));
 		  drm_get_connector_name(connector));
 
 
 	for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
 	for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
-		device_remove_file(&connector->kdev, &connector_attrs[i]);
-	sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr);
-	device_unregister(&connector->kdev);
-	connector->kdev.parent = NULL;
+		device_remove_file(connector->kdev, &connector_attrs[i]);
+	sysfs_remove_bin_file(&connector->kdev->kobj, &edid_attr);
+	device_unregister(connector->kdev);
+	connector->kdev = NULL;
 }
 }
 EXPORT_SYMBOL(drm_sysfs_connector_remove);
 EXPORT_SYMBOL(drm_sysfs_connector_remove);
 
 
@@ -508,7 +485,7 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)
 
 
 	DRM_DEBUG("generating hotplug event\n");
 	DRM_DEBUG("generating hotplug event\n");
 
 
-	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
+	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
 }
 }
 EXPORT_SYMBOL(drm_sysfs_hotplug_event);
 EXPORT_SYMBOL(drm_sysfs_hotplug_event);
 
 
@@ -523,15 +500,8 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
  */
  */
 int drm_sysfs_device_add(struct drm_minor *minor)
 int drm_sysfs_device_add(struct drm_minor *minor)
 {
 {
-	int err;
 	char *minor_str;
 	char *minor_str;
 
 
-	minor->kdev.parent = minor->dev->dev;
-
-	minor->kdev.class = drm_class;
-	minor->kdev.release = drm_sysfs_device_release;
-	minor->kdev.devt = minor->device;
-	minor->kdev.type = &drm_sysfs_device_minor;
 	if (minor->type == DRM_MINOR_CONTROL)
 	if (minor->type == DRM_MINOR_CONTROL)
 		minor_str = "controlD%d";
 		minor_str = "controlD%d";
         else if (minor->type == DRM_MINOR_RENDER)
         else if (minor->type == DRM_MINOR_RENDER)
@@ -539,18 +509,14 @@ int drm_sysfs_device_add(struct drm_minor *minor)
         else
         else
                 minor_str = "card%d";
                 minor_str = "card%d";
 
 
-	dev_set_name(&minor->kdev, minor_str, minor->index);
-
-	err = device_register(&minor->kdev);
-	if (err) {
-		DRM_ERROR("device add failed: %d\n", err);
-		goto err_out;
+	minor->kdev = device_create(drm_class, minor->dev->dev,
+				    MKDEV(DRM_MAJOR, minor->index),
+				    minor, minor_str, minor->index);
+	if (IS_ERR(minor->kdev)) {
+		DRM_ERROR("device create failed %ld\n", PTR_ERR(minor->kdev));
+		return PTR_ERR(minor->kdev);
 	}
 	}
-
 	return 0;
 	return 0;
-
-err_out:
-	return err;
 }
 }
 
 
 /**
 /**
@@ -562,9 +528,9 @@ err_out:
  */
  */
 void drm_sysfs_device_remove(struct drm_minor *minor)
 void drm_sysfs_device_remove(struct drm_minor *minor)
 {
 {
-	if (minor->kdev.parent)
-		device_unregister(&minor->kdev);
-	minor->kdev.parent = NULL;
+	if (minor->kdev)
+		device_destroy(drm_class, MKDEV(DRM_MAJOR, minor->index));
+	minor->kdev = NULL;
 }
 }
 
 
 
 

+ 6 - 51
drivers/gpu/drm/drm_usb.c

@@ -7,57 +7,20 @@ int drm_get_usb_dev(struct usb_interface *interface,
 		    struct drm_driver *driver)
 		    struct drm_driver *driver)
 {
 {
 	struct drm_device *dev;
 	struct drm_device *dev;
-	struct usb_device *usbdev;
 	int ret;
 	int ret;
 
 
 	DRM_DEBUG("\n");
 	DRM_DEBUG("\n");
 
 
-	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	dev = drm_dev_alloc(driver, &interface->dev);
 	if (!dev)
 	if (!dev)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	usbdev = interface_to_usbdev(interface);
-	dev->usbdev = usbdev;
-	dev->dev = &interface->dev;
-
-	mutex_lock(&drm_global_mutex);
-
-	ret = drm_fill_in_dev(dev, NULL, driver);
-	if (ret) {
-		printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
-		goto err_g1;
-	}
-
+	dev->usbdev = interface_to_usbdev(interface);
 	usb_set_intfdata(interface, dev);
 	usb_set_intfdata(interface, dev);
-	ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
-	if (ret)
-		goto err_g1;
-
-	if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
-		ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
-		if (ret)
-			goto err_g11;
-	}
 
 
-	ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+	ret = drm_dev_register(dev, 0);
 	if (ret)
 	if (ret)
-		goto err_g2;
-
-	if (dev->driver->load) {
-		ret = dev->driver->load(dev, 0);
-		if (ret)
-			goto err_g3;
-	}
-
-	/* setup the grouping for the legacy output */
-	ret = drm_mode_group_init_legacy_group(dev,
-					       &dev->primary->mode_group);
-	if (ret)
-		goto err_g3;
-
-	list_add_tail(&dev->driver_item, &driver->device_list);
-
-	mutex_unlock(&drm_global_mutex);
+		goto err_free;
 
 
 	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
 	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
 		 driver->name, driver->major, driver->minor, driver->patchlevel,
 		 driver->name, driver->major, driver->minor, driver->patchlevel,
@@ -65,16 +28,8 @@ int drm_get_usb_dev(struct usb_interface *interface,
 
 
 	return 0;
 	return 0;
 
 
-err_g3:
-	drm_put_minor(&dev->primary);
-err_g2:
-	if (dev->render)
-		drm_put_minor(&dev->render);
-err_g11:
-	drm_put_minor(&dev->control);
-err_g1:
-	kfree(dev);
-	mutex_unlock(&drm_global_mutex);
+err_free:
+	drm_dev_free(dev);
 	return ret;
 	return ret;
 
 
 }
 }

+ 1 - 1
drivers/gpu/drm/drm_vm.c

@@ -301,7 +301,7 @@ static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
 
 	offset = (unsigned long)vmf->virtual_address - vma->vm_start;	/* vm_[pg]off[set] should be 0 */
 	offset = (unsigned long)vmf->virtual_address - vma->vm_start;	/* vm_[pg]off[set] should be 0 */
 	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
 	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
-	page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
+	page = virt_to_page((void *)dma->pagelist[page_nr]);
 
 
 	get_page(page);
 	get_page(page);
 	vmf->page = page;
 	vmf->page = page;

+ 1 - 0
drivers/gpu/drm/exynos/Kconfig

@@ -2,6 +2,7 @@ config DRM_EXYNOS
 	tristate "DRM Support for Samsung SoC EXYNOS Series"
 	tristate "DRM Support for Samsung SoC EXYNOS Series"
 	depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
 	depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
 	select DRM_KMS_HELPER
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select FB_CFB_FILLRECT
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
 	select FB_CFB_IMAGEBLIT

+ 0 - 1
drivers/gpu/drm/exynos/exynos_drm_drv.c

@@ -264,7 +264,6 @@ static struct drm_driver exynos_drm_driver = {
 	.get_vblank_counter	= drm_vblank_count,
 	.get_vblank_counter	= drm_vblank_count,
 	.enable_vblank		= exynos_drm_crtc_enable_vblank,
 	.enable_vblank		= exynos_drm_crtc_enable_vblank,
 	.disable_vblank		= exynos_drm_crtc_disable_vblank,
 	.disable_vblank		= exynos_drm_crtc_disable_vblank,
-	.gem_init_object	= exynos_drm_gem_init_object,
 	.gem_free_object	= exynos_drm_gem_free_object,
 	.gem_free_object	= exynos_drm_gem_free_object,
 	.gem_vm_ops		= &exynos_drm_gem_vm_ops,
 	.gem_vm_ops		= &exynos_drm_gem_vm_ops,
 	.dumb_create		= exynos_drm_gem_dumb_create,
 	.dumb_create		= exynos_drm_gem_dumb_create,

+ 4 - 4
drivers/gpu/drm/exynos/exynos_drm_fimd.c

@@ -716,20 +716,20 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
 {
 {
 	/*
 	/*
 	 * enable drm irq mode.
 	 * enable drm irq mode.
-	 * - with irq_enabled = 1, we can use the vblank feature.
+	 * - with irq_enabled = true, we can use the vblank feature.
 	 *
 	 *
 	 * P.S. note that we wouldn't use drm irq handler but
 	 * P.S. note that we wouldn't use drm irq handler but
 	 *	just specific driver own one instead because
 	 *	just specific driver own one instead because
 	 *	drm framework supports only one irq handler.
 	 *	drm framework supports only one irq handler.
 	 */
 	 */
-	drm_dev->irq_enabled = 1;
+	drm_dev->irq_enabled = true;
 
 
 	/*
 	/*
-	 * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+	 * with vblank_disable_allowed = true, vblank interrupt will be disabled
 	 * by drm timer once a current process gives up ownership of
 	 * by drm timer once a current process gives up ownership of
 	 * vblank event.(after drm_vblank_put function is called)
 	 * vblank event.(after drm_vblank_put function is called)
 	 */
 	 */
-	drm_dev->vblank_disable_allowed = 1;
+	drm_dev->vblank_disable_allowed = true;
 
 
 	/* attach this sub driver to iommu mapping if supported. */
 	/* attach this sub driver to iommu mapping if supported. */
 	if (is_drm_iommu_supported(drm_dev))
 	if (is_drm_iommu_supported(drm_dev))

+ 0 - 5
drivers/gpu/drm/exynos/exynos_drm_gem.c

@@ -630,11 +630,6 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
 	dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
 	dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
 }
 }
 
 
-int exynos_drm_gem_init_object(struct drm_gem_object *obj)
-{
-	return 0;
-}
-
 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
 {
 {
 	struct exynos_drm_gem_obj *exynos_gem_obj;
 	struct exynos_drm_gem_obj *exynos_gem_obj;

+ 0 - 3
drivers/gpu/drm/exynos/exynos_drm_gem.h

@@ -135,9 +135,6 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
 						unsigned int gem_handle,
 						unsigned int gem_handle,
 						struct drm_file *file_priv);
 						struct drm_file *file_priv);
 
 
-/* initialize gem object. */
-int exynos_drm_gem_init_object(struct drm_gem_object *obj);
-
 /* free gem object. */
 /* free gem object. */
 void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
 void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
 
 

+ 6 - 10
drivers/gpu/drm/exynos/exynos_drm_vidi.c

@@ -101,7 +101,6 @@ static struct edid *vidi_get_edid(struct device *dev,
 {
 {
 	struct vidi_context *ctx = get_vidi_context(dev);
 	struct vidi_context *ctx = get_vidi_context(dev);
 	struct edid *edid;
 	struct edid *edid;
-	int edid_len;
 
 
 	/*
 	/*
 	 * the edid data comes from user side and it would be set
 	 * the edid data comes from user side and it would be set
@@ -112,8 +111,7 @@ static struct edid *vidi_get_edid(struct device *dev,
 		return ERR_PTR(-EFAULT);
 		return ERR_PTR(-EFAULT);
 	}
 	}
 
 
-	edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
-	edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
+	edid = drm_edid_duplicate(ctx->raw_edid);
 	if (!edid) {
 	if (!edid) {
 		DRM_DEBUG_KMS("failed to allocate edid\n");
 		DRM_DEBUG_KMS("failed to allocate edid\n");
 		return ERR_PTR(-ENOMEM);
 		return ERR_PTR(-ENOMEM);
@@ -385,20 +383,20 @@ static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
 {
 {
 	/*
 	/*
 	 * enable drm irq mode.
 	 * enable drm irq mode.
-	 * - with irq_enabled = 1, we can use the vblank feature.
+	 * - with irq_enabled = true, we can use the vblank feature.
 	 *
 	 *
 	 * P.S. note that we wouldn't use drm irq handler but
 	 * P.S. note that we wouldn't use drm irq handler but
 	 *	just specific driver own one instead because
 	 *	just specific driver own one instead because
 	 *	drm framework supports only one irq handler.
 	 *	drm framework supports only one irq handler.
 	 */
 	 */
-	drm_dev->irq_enabled = 1;
+	drm_dev->irq_enabled = true;
 
 
 	/*
 	/*
-	 * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+	 * with vblank_disable_allowed = true, vblank interrupt will be disabled
 	 * by drm timer once a current process gives up ownership of
 	 * by drm timer once a current process gives up ownership of
 	 * vblank event.(after drm_vblank_put function is called)
 	 * vblank event.(after drm_vblank_put function is called)
 	 */
 	 */
-	drm_dev->vblank_disable_allowed = 1;
+	drm_dev->vblank_disable_allowed = true;
 
 
 	return 0;
 	return 0;
 }
 }
@@ -485,7 +483,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
 	struct exynos_drm_manager *manager;
 	struct exynos_drm_manager *manager;
 	struct exynos_drm_display_ops *display_ops;
 	struct exynos_drm_display_ops *display_ops;
 	struct drm_exynos_vidi_connection *vidi = data;
 	struct drm_exynos_vidi_connection *vidi = data;
-	int edid_len;
 
 
 	if (!vidi) {
 	if (!vidi) {
 		DRM_DEBUG_KMS("user data for vidi is null.\n");
 		DRM_DEBUG_KMS("user data for vidi is null.\n");
@@ -524,8 +521,7 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
 			DRM_DEBUG_KMS("edid data is invalid.\n");
 			DRM_DEBUG_KMS("edid data is invalid.\n");
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}
-		edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
-		ctx->raw_edid = kmemdup(raw_edid, edid_len, GFP_KERNEL);
+		ctx->raw_edid = drm_edid_duplicate(raw_edid);
 		if (!ctx->raw_edid) {
 		if (!ctx->raw_edid) {
 			DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
 			DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
 			return -ENOMEM;
 			return -ENOMEM;

+ 1 - 0
drivers/gpu/drm/gma500/Kconfig

@@ -5,6 +5,7 @@ config DRM_GMA500
 	select FB_CFB_FILLRECT
 	select FB_CFB_FILLRECT
 	select FB_CFB_IMAGEBLIT
 	select FB_CFB_IMAGEBLIT
 	select DRM_KMS_HELPER
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select DRM_TTM
 	select DRM_TTM
 	# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
 	# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
 	select ACPI_VIDEO if ACPI
 	select ACPI_VIDEO if ACPI

+ 1 - 0
drivers/gpu/drm/gma500/cdv_device.c

@@ -634,6 +634,7 @@ const struct psb_ops cdv_chip_ops = {
 	.crtcs = 2,
 	.crtcs = 2,
 	.hdmi_mask = (1 << 0) | (1 << 1),
 	.hdmi_mask = (1 << 0) | (1 << 1),
 	.lvds_mask = (1 << 1),
 	.lvds_mask = (1 << 1),
+	.sdvo_mask = (1 << 0),
 	.cursor_needs_phys = 0,
 	.cursor_needs_phys = 0,
 	.sgx_offset = MRST_SGX_OFFSET,
 	.sgx_offset = MRST_SGX_OFFSET,
 	.chip_setup = cdv_chip_setup,
 	.chip_setup = cdv_chip_setup,

+ 1 - 1
drivers/gpu/drm/gma500/cdv_intel_dp.c

@@ -666,7 +666,7 @@ cdv_intel_dp_i2c_init(struct gma_connector *connector,
 	strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
 	strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
 	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
 	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
 	intel_dp->adapter.algo_data = &intel_dp->algo;
 	intel_dp->adapter.algo_data = &intel_dp->algo;
-	intel_dp->adapter.dev.parent = &connector->base.kdev;
+	intel_dp->adapter.dev.parent = connector->base.kdev;
 
 
 	if (is_edp(encoder))
 	if (is_edp(encoder))
 		cdv_intel_edp_panel_vdd_on(encoder);
 		cdv_intel_edp_panel_vdd_on(encoder);

+ 1 - 1
drivers/gpu/drm/gma500/framebuffer.c

@@ -714,7 +714,7 @@ static void psb_setup_outputs(struct drm_device *dev)
 			clone_mask = (1 << INTEL_OUTPUT_ANALOG);
 			clone_mask = (1 << INTEL_OUTPUT_ANALOG);
 			break;
 			break;
 		case INTEL_OUTPUT_SDVO:
 		case INTEL_OUTPUT_SDVO:
-			crtc_mask = ((1 << 0) | (1 << 1));
+			crtc_mask = dev_priv->ops->sdvo_mask;
 			clone_mask = (1 << INTEL_OUTPUT_SDVO);
 			clone_mask = (1 << INTEL_OUTPUT_SDVO);
 			break;
 			break;
 		case INTEL_OUTPUT_LVDS:
 		case INTEL_OUTPUT_LVDS:

+ 0 - 5
drivers/gpu/drm/gma500/gem.c

@@ -29,11 +29,6 @@
 #include <drm/drm_vma_manager.h>
 #include <drm/drm_vma_manager.h>
 #include "psb_drv.h"
 #include "psb_drv.h"
 
 
-int psb_gem_init_object(struct drm_gem_object *obj)
-{
-	return -EINVAL;
-}
-
 void psb_gem_free_object(struct drm_gem_object *obj)
 void psb_gem_free_object(struct drm_gem_object *obj)
 {
 {
 	struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
 	struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);

+ 49 - 41
drivers/gpu/drm/gma500/intel_gmbus.c

@@ -51,6 +51,9 @@
 #define wait_for(COND, MS) _wait_for(COND, MS, 1)
 #define wait_for(COND, MS) _wait_for(COND, MS, 1)
 #define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
 #define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
 
 
+#define GMBUS_REG_READ(reg) ioread32(dev_priv->gmbus_reg + (reg))
+#define GMBUS_REG_WRITE(reg, val) iowrite32((val), dev_priv->gmbus_reg + (reg))
+
 /* Intel GPIO access functions */
 /* Intel GPIO access functions */
 
 
 #define I2C_RISEFALL_TIME 20
 #define I2C_RISEFALL_TIME 20
@@ -71,7 +74,8 @@ struct intel_gpio {
 void
 void
 gma_intel_i2c_reset(struct drm_device *dev)
 gma_intel_i2c_reset(struct drm_device *dev)
 {
 {
-	REG_WRITE(GMBUS0, 0);
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	GMBUS_REG_WRITE(GMBUS0, 0);
 }
 }
 
 
 static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
 static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
@@ -98,11 +102,10 @@ static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
 static u32 get_reserved(struct intel_gpio *gpio)
 static u32 get_reserved(struct intel_gpio *gpio)
 {
 {
 	struct drm_psb_private *dev_priv = gpio->dev_priv;
 	struct drm_psb_private *dev_priv = gpio->dev_priv;
-	struct drm_device *dev = dev_priv->dev;
 	u32 reserved = 0;
 	u32 reserved = 0;
 
 
 	/* On most chips, these bits must be preserved in software. */
 	/* On most chips, these bits must be preserved in software. */
-	reserved = REG_READ(gpio->reg) &
+	reserved = GMBUS_REG_READ(gpio->reg) &
 				     (GPIO_DATA_PULLUP_DISABLE |
 				     (GPIO_DATA_PULLUP_DISABLE |
 				      GPIO_CLOCK_PULLUP_DISABLE);
 				      GPIO_CLOCK_PULLUP_DISABLE);
 
 
@@ -113,29 +116,26 @@ static int get_clock(void *data)
 {
 {
 	struct intel_gpio *gpio = data;
 	struct intel_gpio *gpio = data;
 	struct drm_psb_private *dev_priv = gpio->dev_priv;
 	struct drm_psb_private *dev_priv = gpio->dev_priv;
-	struct drm_device *dev = dev_priv->dev;
 	u32 reserved = get_reserved(gpio);
 	u32 reserved = get_reserved(gpio);
-	REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
-	REG_WRITE(gpio->reg, reserved);
-	return (REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
+	GMBUS_REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+	GMBUS_REG_WRITE(gpio->reg, reserved);
+	return (GMBUS_REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
 }
 }
 
 
 static int get_data(void *data)
 static int get_data(void *data)
 {
 {
 	struct intel_gpio *gpio = data;
 	struct intel_gpio *gpio = data;
 	struct drm_psb_private *dev_priv = gpio->dev_priv;
 	struct drm_psb_private *dev_priv = gpio->dev_priv;
-	struct drm_device *dev = dev_priv->dev;
 	u32 reserved = get_reserved(gpio);
 	u32 reserved = get_reserved(gpio);
-	REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
-	REG_WRITE(gpio->reg, reserved);
-	return (REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
+	GMBUS_REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+	GMBUS_REG_WRITE(gpio->reg, reserved);
+	return (GMBUS_REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
 }
 }
 
 
 static void set_clock(void *data, int state_high)
 static void set_clock(void *data, int state_high)
 {
 {
 	struct intel_gpio *gpio = data;
 	struct intel_gpio *gpio = data;
 	struct drm_psb_private *dev_priv = gpio->dev_priv;
 	struct drm_psb_private *dev_priv = gpio->dev_priv;
-	struct drm_device *dev = dev_priv->dev;
 	u32 reserved = get_reserved(gpio);
 	u32 reserved = get_reserved(gpio);
 	u32 clock_bits;
 	u32 clock_bits;
 
 
@@ -145,15 +145,14 @@ static void set_clock(void *data, int state_high)
 		clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
 		clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
 			GPIO_CLOCK_VAL_MASK;
 			GPIO_CLOCK_VAL_MASK;
 
 
-	REG_WRITE(gpio->reg, reserved | clock_bits);
-	REG_READ(gpio->reg); /* Posting */
+	GMBUS_REG_WRITE(gpio->reg, reserved | clock_bits);
+	GMBUS_REG_READ(gpio->reg); /* Posting */
 }
 }
 
 
 static void set_data(void *data, int state_high)
 static void set_data(void *data, int state_high)
 {
 {
 	struct intel_gpio *gpio = data;
 	struct intel_gpio *gpio = data;
 	struct drm_psb_private *dev_priv = gpio->dev_priv;
 	struct drm_psb_private *dev_priv = gpio->dev_priv;
-	struct drm_device *dev = dev_priv->dev;
 	u32 reserved = get_reserved(gpio);
 	u32 reserved = get_reserved(gpio);
 	u32 data_bits;
 	u32 data_bits;
 
 
@@ -163,8 +162,8 @@ static void set_data(void *data, int state_high)
 		data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
 		data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
 			GPIO_DATA_VAL_MASK;
 			GPIO_DATA_VAL_MASK;
 
 
-	REG_WRITE(gpio->reg, reserved | data_bits);
-	REG_READ(gpio->reg);
+	GMBUS_REG_WRITE(gpio->reg, reserved | data_bits);
+	GMBUS_REG_READ(gpio->reg);
 }
 }
 
 
 static struct i2c_adapter *
 static struct i2c_adapter *
@@ -251,7 +250,6 @@ gmbus_xfer(struct i2c_adapter *adapter,
 					       struct intel_gmbus,
 					       struct intel_gmbus,
 					       adapter);
 					       adapter);
 	struct drm_psb_private *dev_priv = adapter->algo_data;
 	struct drm_psb_private *dev_priv = adapter->algo_data;
-	struct drm_device *dev = dev_priv->dev;
 	int i, reg_offset;
 	int i, reg_offset;
 
 
 	if (bus->force_bit)
 	if (bus->force_bit)
@@ -260,28 +258,30 @@ gmbus_xfer(struct i2c_adapter *adapter,
 
 
 	reg_offset = 0;
 	reg_offset = 0;
 
 
-	REG_WRITE(GMBUS0 + reg_offset, bus->reg0);
+	GMBUS_REG_WRITE(GMBUS0 + reg_offset, bus->reg0);
 
 
 	for (i = 0; i < num; i++) {
 	for (i = 0; i < num; i++) {
 		u16 len = msgs[i].len;
 		u16 len = msgs[i].len;
 		u8 *buf = msgs[i].buf;
 		u8 *buf = msgs[i].buf;
 
 
 		if (msgs[i].flags & I2C_M_RD) {
 		if (msgs[i].flags & I2C_M_RD) {
-			REG_WRITE(GMBUS1 + reg_offset,
-				   GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
-				   (len << GMBUS_BYTE_COUNT_SHIFT) |
-				   (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
-				   GMBUS_SLAVE_READ | GMBUS_SW_RDY);
-			REG_READ(GMBUS2+reg_offset);
+			GMBUS_REG_WRITE(GMBUS1 + reg_offset,
+					GMBUS_CYCLE_WAIT |
+					(i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
+					(len << GMBUS_BYTE_COUNT_SHIFT) |
+					(msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+					GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+			GMBUS_REG_READ(GMBUS2+reg_offset);
 			do {
 			do {
 				u32 val, loop = 0;
 				u32 val, loop = 0;
 
 
-				if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+				if (wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) &
+					     (GMBUS_SATOER | GMBUS_HW_RDY), 50))
 					goto timeout;
 					goto timeout;
-				if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+				if (GMBUS_REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
 					goto clear_err;
 					goto clear_err;
 
 
-				val = REG_READ(GMBUS3 + reg_offset);
+				val = GMBUS_REG_READ(GMBUS3 + reg_offset);
 				do {
 				do {
 					*buf++ = val & 0xff;
 					*buf++ = val & 0xff;
 					val >>= 8;
 					val >>= 8;
@@ -295,18 +295,20 @@ gmbus_xfer(struct i2c_adapter *adapter,
 				val |= *buf++ << (8 * loop);
 				val |= *buf++ << (8 * loop);
 			} while (--len && ++loop < 4);
 			} while (--len && ++loop < 4);
 
 
-			REG_WRITE(GMBUS3 + reg_offset, val);
-			REG_WRITE(GMBUS1 + reg_offset,
+			GMBUS_REG_WRITE(GMBUS3 + reg_offset, val);
+			GMBUS_REG_WRITE(GMBUS1 + reg_offset,
 				   (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
 				   (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
 				   (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
 				   (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
 				   (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
 				   (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
 				   GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
 				   GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
-			REG_READ(GMBUS2+reg_offset);
+			GMBUS_REG_READ(GMBUS2+reg_offset);
 
 
 			while (len) {
 			while (len) {
-				if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+				if (wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) &
+					     (GMBUS_SATOER | GMBUS_HW_RDY), 50))
 					goto timeout;
 					goto timeout;
-				if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+				if (GMBUS_REG_READ(GMBUS2 + reg_offset) &
+				    GMBUS_SATOER)
 					goto clear_err;
 					goto clear_err;
 
 
 				val = loop = 0;
 				val = loop = 0;
@@ -314,14 +316,14 @@ gmbus_xfer(struct i2c_adapter *adapter,
 					val |= *buf++ << (8 * loop);
 					val |= *buf++ << (8 * loop);
 				} while (--len && ++loop < 4);
 				} while (--len && ++loop < 4);
 
 
-				REG_WRITE(GMBUS3 + reg_offset, val);
-				REG_READ(GMBUS2+reg_offset);
+				GMBUS_REG_WRITE(GMBUS3 + reg_offset, val);
+				GMBUS_REG_READ(GMBUS2+reg_offset);
 			}
 			}
 		}
 		}
 
 
-		if (i + 1 < num && wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
+		if (i + 1 < num && wait_for(GMBUS_REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
 			goto timeout;
 			goto timeout;
-		if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+		if (GMBUS_REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
 			goto clear_err;
 			goto clear_err;
 	}
 	}
 
 
@@ -332,20 +334,20 @@ clear_err:
 	 * of resetting the GMBUS controller and so clearing the
 	 * of resetting the GMBUS controller and so clearing the
 	 * BUS_ERROR raised by the slave's NAK.
 	 * BUS_ERROR raised by the slave's NAK.
 	 */
 	 */
-	REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
-	REG_WRITE(GMBUS1 + reg_offset, 0);
+	GMBUS_REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
+	GMBUS_REG_WRITE(GMBUS1 + reg_offset, 0);
 
 
 done:
 done:
 	/* Mark the GMBUS interface as disabled. We will re-enable it at the
 	/* Mark the GMBUS interface as disabled. We will re-enable it at the
 	 * start of the next xfer, till then let it sleep.
 	 * start of the next xfer, till then let it sleep.
 	 */
 	 */
-	REG_WRITE(GMBUS0 + reg_offset, 0);
+	GMBUS_REG_WRITE(GMBUS0 + reg_offset, 0);
 	return i;
 	return i;
 
 
 timeout:
 timeout:
 	DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
 	DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
 		 bus->reg0 & 0xff, bus->adapter.name);
 		 bus->reg0 & 0xff, bus->adapter.name);
-	REG_WRITE(GMBUS0 + reg_offset, 0);
+	GMBUS_REG_WRITE(GMBUS0 + reg_offset, 0);
 
 
 	/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
 	/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
 	bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
 	bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
@@ -399,6 +401,11 @@ int gma_intel_setup_gmbus(struct drm_device *dev)
 	if (dev_priv->gmbus == NULL)
 	if (dev_priv->gmbus == NULL)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
+	if (IS_MRST(dev))
+		dev_priv->gmbus_reg = dev_priv->aux_reg;
+	else
+		dev_priv->gmbus_reg = dev_priv->vdc_reg;
+
 	for (i = 0; i < GMBUS_NUM_PORTS; i++) {
 	for (i = 0; i < GMBUS_NUM_PORTS; i++) {
 		struct intel_gmbus *bus = &dev_priv->gmbus[i];
 		struct intel_gmbus *bus = &dev_priv->gmbus[i];
 
 
@@ -487,6 +494,7 @@ void gma_intel_teardown_gmbus(struct drm_device *dev)
 		i2c_del_adapter(&bus->adapter);
 		i2c_del_adapter(&bus->adapter);
 	}
 	}
 
 
+	dev_priv->gmbus_reg = NULL; /* iounmap is done in driver_unload */
 	kfree(dev_priv->gmbus);
 	kfree(dev_priv->gmbus);
 	dev_priv->gmbus = NULL;
 	dev_priv->gmbus = NULL;
 }
 }

+ 269 - 164
drivers/gpu/drm/gma500/oaktrail_crtc.c

@@ -26,24 +26,10 @@
 #include "gma_display.h"
 #include "gma_display.h"
 #include "power.h"
 #include "power.h"
 
 
-struct psb_intel_range_t {
-	int min, max;
-};
-
-struct oaktrail_limit_t {
-	struct psb_intel_range_t dot, m, p1;
-};
-
-struct oaktrail_clock_t {
-	/* derived values */
-	int dot;
-	int m;
-	int p1;
-};
-
-#define MRST_LIMIT_LVDS_100L	    0
-#define MRST_LIMIT_LVDS_83	    1
-#define MRST_LIMIT_LVDS_100	    2
+#define MRST_LIMIT_LVDS_100L	0
+#define MRST_LIMIT_LVDS_83	1
+#define MRST_LIMIT_LVDS_100	2
+#define MRST_LIMIT_SDVO		3
 
 
 #define MRST_DOT_MIN		  19750
 #define MRST_DOT_MIN		  19750
 #define MRST_DOT_MAX		  120000
 #define MRST_DOT_MAX		  120000
@@ -57,21 +43,40 @@ struct oaktrail_clock_t {
 #define MRST_P1_MAX_0		    7
 #define MRST_P1_MAX_0		    7
 #define MRST_P1_MAX_1		    8
 #define MRST_P1_MAX_1		    8
 
 
-static const struct oaktrail_limit_t oaktrail_limits[] = {
+static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
+				    struct drm_crtc *crtc, int target,
+				    int refclk, struct gma_clock_t *best_clock);
+
+static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
+				    struct drm_crtc *crtc, int target,
+				    int refclk, struct gma_clock_t *best_clock);
+
+static const struct gma_limit_t mrst_limits[] = {
 	{			/* MRST_LIMIT_LVDS_100L */
 	{			/* MRST_LIMIT_LVDS_100L */
 	 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
 	 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
 	 .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
 	 .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
 	 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
 	 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
+	 .find_pll = mrst_lvds_find_best_pll,
 	 },
 	 },
 	{			/* MRST_LIMIT_LVDS_83L */
 	{			/* MRST_LIMIT_LVDS_83L */
 	 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
 	 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
 	 .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
 	 .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
 	 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
 	 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
+	 .find_pll = mrst_lvds_find_best_pll,
 	 },
 	 },
 	{			/* MRST_LIMIT_LVDS_100 */
 	{			/* MRST_LIMIT_LVDS_100 */
 	 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
 	 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
 	 .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
 	 .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
 	 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
 	 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
+	 .find_pll = mrst_lvds_find_best_pll,
+	 },
+	{			/* MRST_LIMIT_SDVO */
+	 .vco = {.min = 1400000, .max = 2800000},
+	 .n = {.min = 3, .max = 7},
+	 .m = {.min = 80, .max = 137},
+	 .p1 = {.min = 1, .max = 2},
+	 .p2 = {.dot_limit = 200000, .p2_slow = 10, .p2_fast = 10},
+	 .find_pll = mrst_sdvo_find_best_pll,
 	 },
 	 },
 };
 };
 
 
@@ -82,9 +87,10 @@ static const u32 oaktrail_m_converts[] = {
 	0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
 	0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
 };
 };
 
 
-static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
+static const struct gma_limit_t *mrst_limit(struct drm_crtc *crtc,
+					    int refclk)
 {
 {
-	const struct oaktrail_limit_t *limit = NULL;
+	const struct gma_limit_t *limit = NULL;
 	struct drm_device *dev = crtc->dev;
 	struct drm_device *dev = crtc->dev;
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct drm_psb_private *dev_priv = dev->dev_private;
 
 
@@ -92,45 +98,100 @@ static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
 	    || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
 	    || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
 		switch (dev_priv->core_freq) {
 		switch (dev_priv->core_freq) {
 		case 100:
 		case 100:
-			limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L];
+			limit = &mrst_limits[MRST_LIMIT_LVDS_100L];
 			break;
 			break;
 		case 166:
 		case 166:
-			limit = &oaktrail_limits[MRST_LIMIT_LVDS_83];
+			limit = &mrst_limits[MRST_LIMIT_LVDS_83];
 			break;
 			break;
 		case 200:
 		case 200:
-			limit = &oaktrail_limits[MRST_LIMIT_LVDS_100];
+			limit = &mrst_limits[MRST_LIMIT_LVDS_100];
 			break;
 			break;
 		}
 		}
+	} else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
+		limit = &mrst_limits[MRST_LIMIT_SDVO];
 	} else {
 	} else {
 		limit = NULL;
 		limit = NULL;
-		dev_err(dev->dev, "oaktrail_limit Wrong display type.\n");
+		dev_err(dev->dev, "mrst_limit Wrong display type.\n");
 	}
 	}
 
 
 	return limit;
 	return limit;
 }
 }
 
 
 /** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
 /** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
-static void oaktrail_clock(int refclk, struct oaktrail_clock_t *clock)
+static void mrst_lvds_clock(int refclk, struct gma_clock_t *clock)
 {
 {
 	clock->dot = (refclk * clock->m) / (14 * clock->p1);
 	clock->dot = (refclk * clock->m) / (14 * clock->p1);
 }
 }
 
 
-static void mrstPrintPll(char *prefix, struct oaktrail_clock_t *clock)
+static void mrst_print_pll(struct gma_clock_t *clock)
 {
 {
-	pr_debug("%s: dotclock = %d,  m = %d, p1 = %d.\n",
-	     prefix, clock->dot, clock->m, clock->p1);
+	DRM_DEBUG_DRIVER("dotclock=%d,  m=%d, m1=%d, m2=%d, n=%d, p1=%d, p2=%d\n",
+			 clock->dot, clock->m, clock->m1, clock->m2, clock->n,
+			 clock->p1, clock->p2);
+}
+
+static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
+				    struct drm_crtc *crtc, int target,
+				    int refclk, struct gma_clock_t *best_clock)
+{
+	struct gma_clock_t clock;
+	u32 target_vco, actual_freq;
+	s32 freq_error, min_error = 100000;
+
+	memset(best_clock, 0, sizeof(*best_clock));
+
+	for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
+		for (clock.n = limit->n.min; clock.n <= limit->n.max;
+		     clock.n++) {
+			for (clock.p1 = limit->p1.min;
+			     clock.p1 <= limit->p1.max; clock.p1++) {
+				/* p2 value always stored in p2_slow on SDVO */
+				clock.p = clock.p1 * limit->p2.p2_slow;
+				target_vco = target * clock.p;
+
+				/* VCO will increase at this point so break */
+				if (target_vco > limit->vco.max)
+					break;
+
+				if (target_vco < limit->vco.min)
+					continue;
+
+				actual_freq = (refclk * clock.m) /
+					      (clock.n * clock.p);
+				freq_error = 10000 -
+					     ((target * 10000) / actual_freq);
+
+				if (freq_error < -min_error) {
+					/* freq_error will start to decrease at
+					   this point so break */
+					break;
+				}
+
+				if (freq_error < 0)
+					freq_error = -freq_error;
+
+				if (freq_error < min_error) {
+					min_error = freq_error;
+					*best_clock = clock;
+				}
+			}
+		}
+		if (min_error == 0)
+			break;
+	}
+
+	return min_error == 0;
 }
 }
 
 
 /**
 /**
  * Returns a set of divisors for the desired target clock with the given refclk,
  * Returns a set of divisors for the desired target clock with the given refclk,
  * or FALSE.  Divisor values are the actual divisors for
  * or FALSE.  Divisor values are the actual divisors for
  */
  */
-static bool
-mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
-		struct oaktrail_clock_t *best_clock)
+static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
+				    struct drm_crtc *crtc, int target,
+				    int refclk, struct gma_clock_t *best_clock)
 {
 {
-	struct oaktrail_clock_t clock;
-	const struct oaktrail_limit_t *limit = oaktrail_limit(crtc);
+	struct gma_clock_t clock;
 	int err = target;
 	int err = target;
 
 
 	memset(best_clock, 0, sizeof(*best_clock));
 	memset(best_clock, 0, sizeof(*best_clock));
@@ -140,7 +201,7 @@ mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
 		     clock.p1++) {
 		     clock.p1++) {
 			int this_err;
 			int this_err;
 
 
-			oaktrail_clock(refclk, &clock);
+			mrst_lvds_clock(refclk, &clock);
 
 
 			this_err = abs(clock.dot - target);
 			this_err = abs(clock.dot - target);
 			if (this_err < err) {
 			if (this_err < err) {
@@ -149,7 +210,6 @@ mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
 			}
 			}
 		}
 		}
 	}
 	}
-	dev_dbg(crtc->dev->dev, "mrstFindBestPLL err = %d.\n", err);
 	return err != target;
 	return err != target;
 }
 }
 
 
@@ -167,8 +227,10 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
 	int pipe = gma_crtc->pipe;
 	int pipe = gma_crtc->pipe;
 	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	u32 temp;
 	u32 temp;
+	int i;
+	int need_aux = gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ? 1 : 0;
 
 
-	if (pipe == 1) {
+	if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
 		oaktrail_crtc_hdmi_dpms(crtc, mode);
 		oaktrail_crtc_hdmi_dpms(crtc, mode);
 		return;
 		return;
 	}
 	}
@@ -183,35 +245,45 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
 	case DRM_MODE_DPMS_ON:
 	case DRM_MODE_DPMS_ON:
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
 	case DRM_MODE_DPMS_SUSPEND:
-		/* Enable the DPLL */
-		temp = REG_READ(map->dpll);
-		if ((temp & DPLL_VCO_ENABLE) == 0) {
-			REG_WRITE(map->dpll, temp);
-			REG_READ(map->dpll);
-			/* Wait for the clocks to stabilize. */
-			udelay(150);
-			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
-			REG_READ(map->dpll);
-			/* Wait for the clocks to stabilize. */
-			udelay(150);
-			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
-			REG_READ(map->dpll);
-			/* Wait for the clocks to stabilize. */
-			udelay(150);
-		}
-		/* Enable the pipe */
-		temp = REG_READ(map->conf);
-		if ((temp & PIPEACONF_ENABLE) == 0)
-			REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
-		/* Enable the plane */
-		temp = REG_READ(map->cntr);
-		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
-			REG_WRITE(map->cntr,
-				  temp | DISPLAY_PLANE_ENABLE);
-			/* Flush the plane changes */
-			REG_WRITE(map->base, REG_READ(map->base));
-		}
+		for (i = 0; i <= need_aux; i++) {
+			/* Enable the DPLL */
+			temp = REG_READ_WITH_AUX(map->dpll, i);
+			if ((temp & DPLL_VCO_ENABLE) == 0) {
+				REG_WRITE_WITH_AUX(map->dpll, temp, i);
+				REG_READ_WITH_AUX(map->dpll, i);
+				/* Wait for the clocks to stabilize. */
+				udelay(150);
+				REG_WRITE_WITH_AUX(map->dpll,
+						   temp | DPLL_VCO_ENABLE, i);
+				REG_READ_WITH_AUX(map->dpll, i);
+				/* Wait for the clocks to stabilize. */
+				udelay(150);
+				REG_WRITE_WITH_AUX(map->dpll,
+						   temp | DPLL_VCO_ENABLE, i);
+				REG_READ_WITH_AUX(map->dpll, i);
+				/* Wait for the clocks to stabilize. */
+				udelay(150);
+			}
+
+			/* Enable the pipe */
+			temp = REG_READ_WITH_AUX(map->conf, i);
+			if ((temp & PIPEACONF_ENABLE) == 0) {
+				REG_WRITE_WITH_AUX(map->conf,
+						   temp | PIPEACONF_ENABLE, i);
+			}
+
+			/* Enable the plane */
+			temp = REG_READ_WITH_AUX(map->cntr, i);
+			if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+				REG_WRITE_WITH_AUX(map->cntr,
+						   temp | DISPLAY_PLANE_ENABLE,
+						   i);
+				/* Flush the plane changes */
+				REG_WRITE_WITH_AUX(map->base,
+					REG_READ_WITH_AUX(map->base, i), i);
+			}
 
 
+		}
 		gma_crtc_load_lut(crtc);
 		gma_crtc_load_lut(crtc);
 
 
 		/* Give the overlay scaler a chance to enable
 		/* Give the overlay scaler a chance to enable
@@ -223,48 +295,52 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
 		 * if it's on this pipe */
 		 * if it's on this pipe */
 		/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
 		/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
 
 
-		/* Disable the VGA plane that we never use */
-		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
-		/* Disable display plane */
-		temp = REG_READ(map->cntr);
-		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
-			REG_WRITE(map->cntr,
-				  temp & ~DISPLAY_PLANE_ENABLE);
-			/* Flush the plane changes */
-			REG_WRITE(map->base, REG_READ(map->base));
-			REG_READ(map->base);
-		}
+		for (i = 0; i <= need_aux; i++) {
+			/* Disable the VGA plane that we never use */
+			REG_WRITE_WITH_AUX(VGACNTRL, VGA_DISP_DISABLE, i);
+			/* Disable display plane */
+			temp = REG_READ_WITH_AUX(map->cntr, i);
+			if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+				REG_WRITE_WITH_AUX(map->cntr,
+					temp & ~DISPLAY_PLANE_ENABLE, i);
+				/* Flush the plane changes */
+				REG_WRITE_WITH_AUX(map->base,
+						   REG_READ(map->base), i);
+				REG_READ_WITH_AUX(map->base, i);
+			}
 
 
-		/* Next, disable display pipes */
-		temp = REG_READ(map->conf);
-		if ((temp & PIPEACONF_ENABLE) != 0) {
-			REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
-			REG_READ(map->conf);
-		}
-		/* Wait for for the pipe disable to take effect. */
-		gma_wait_for_vblank(dev);
+			/* Next, disable display pipes */
+			temp = REG_READ_WITH_AUX(map->conf, i);
+			if ((temp & PIPEACONF_ENABLE) != 0) {
+				REG_WRITE_WITH_AUX(map->conf,
+						   temp & ~PIPEACONF_ENABLE, i);
+				REG_READ_WITH_AUX(map->conf, i);
+			}
+			/* Wait for for the pipe disable to take effect. */
+			gma_wait_for_vblank(dev);
+
+			temp = REG_READ_WITH_AUX(map->dpll, i);
+			if ((temp & DPLL_VCO_ENABLE) != 0) {
+				REG_WRITE_WITH_AUX(map->dpll,
+						   temp & ~DPLL_VCO_ENABLE, i);
+				REG_READ_WITH_AUX(map->dpll, i);
+			}
 
 
-		temp = REG_READ(map->dpll);
-		if ((temp & DPLL_VCO_ENABLE) != 0) {
-			REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
-			REG_READ(map->dpll);
+			/* Wait for the clocks to turn off. */
+			udelay(150);
 		}
 		}
-
-		/* Wait for the clocks to turn off. */
-		udelay(150);
 		break;
 		break;
 	}
 	}
 
 
-	/*Set FIFO Watermarks*/
-	REG_WRITE(DSPARB, 0x3FFF);
-	REG_WRITE(DSPFW1, 0x3F88080A);
-	REG_WRITE(DSPFW2, 0x0b060808);
+	/* Set FIFO Watermarks (values taken from EMGD) */
+	REG_WRITE(DSPARB, 0x3f80);
+	REG_WRITE(DSPFW1, 0x3f8f0404);
+	REG_WRITE(DSPFW2, 0x04040f04);
 	REG_WRITE(DSPFW3, 0x0);
 	REG_WRITE(DSPFW3, 0x0);
-	REG_WRITE(DSPFW4, 0x08030404);
+	REG_WRITE(DSPFW4, 0x04040404);
 	REG_WRITE(DSPFW5, 0x04040404);
 	REG_WRITE(DSPFW5, 0x04040404);
 	REG_WRITE(DSPFW6, 0x78);
 	REG_WRITE(DSPFW6, 0x78);
-	REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000);
-	/* Must write Bit 14 of the Chicken Bit Register */
+	REG_WRITE(DSPCHICKENBIT, REG_READ(DSPCHICKENBIT) | 0xc040);
 
 
 	gma_power_end(dev);
 	gma_power_end(dev);
 }
 }
@@ -297,7 +373,8 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
 	int pipe = gma_crtc->pipe;
 	int pipe = gma_crtc->pipe;
 	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	const struct psb_offset *map = &dev_priv->regmap[pipe];
 	int refclk = 0;
 	int refclk = 0;
-	struct oaktrail_clock_t clock;
+	struct gma_clock_t clock;
+	const struct gma_limit_t *limit;
 	u32 dpll = 0, fp = 0, dspcntr, pipeconf;
 	u32 dpll = 0, fp = 0, dspcntr, pipeconf;
 	bool ok, is_sdvo = false;
 	bool ok, is_sdvo = false;
 	bool is_lvds = false;
 	bool is_lvds = false;
@@ -306,8 +383,10 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
 	struct gma_encoder *gma_encoder = NULL;
 	struct gma_encoder *gma_encoder = NULL;
 	uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
 	uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
 	struct drm_connector *connector;
 	struct drm_connector *connector;
+	int i;
+	int need_aux = gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ? 1 : 0;
 
 
-	if (pipe == 1)
+	if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
 		return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
 		return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
 
 
 	if (!gma_power_begin(dev, true))
 	if (!gma_power_begin(dev, true))
@@ -340,15 +419,17 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
 	}
 	}
 
 
 	/* Disable the VGA plane that we never use */
 	/* Disable the VGA plane that we never use */
-	REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+	for (i = 0; i <= need_aux; i++)
+		REG_WRITE_WITH_AUX(VGACNTRL, VGA_DISP_DISABLE, i);
 
 
 	/* Disable the panel fitter if it was on our pipe */
 	/* Disable the panel fitter if it was on our pipe */
 	if (oaktrail_panel_fitter_pipe(dev) == pipe)
 	if (oaktrail_panel_fitter_pipe(dev) == pipe)
 		REG_WRITE(PFIT_CONTROL, 0);
 		REG_WRITE(PFIT_CONTROL, 0);
 
 
-	REG_WRITE(map->src,
-		  ((mode->crtc_hdisplay - 1) << 16) |
-		  (mode->crtc_vdisplay - 1));
+	for (i = 0; i <= need_aux; i++) {
+		REG_WRITE_WITH_AUX(map->src, ((mode->crtc_hdisplay - 1) << 16) |
+					     (mode->crtc_vdisplay - 1), i);
+	}
 
 
 	if (gma_encoder)
 	if (gma_encoder)
 		drm_object_property_get_value(&connector->base,
 		drm_object_property_get_value(&connector->base,
@@ -365,35 +446,39 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
 		offsetY = (adjusted_mode->crtc_vdisplay -
 		offsetY = (adjusted_mode->crtc_vdisplay -
 			   mode->crtc_vdisplay) / 2;
 			   mode->crtc_vdisplay) / 2;
 
 
-		REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
-			((adjusted_mode->crtc_htotal - 1) << 16));
-		REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
-			((adjusted_mode->crtc_vtotal - 1) << 16));
-		REG_WRITE(map->hblank,
-			(adjusted_mode->crtc_hblank_start - offsetX - 1) |
-			((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
-		REG_WRITE(map->hsync,
-			(adjusted_mode->crtc_hsync_start - offsetX - 1) |
-			((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
-		REG_WRITE(map->vblank,
-			(adjusted_mode->crtc_vblank_start - offsetY - 1) |
-			((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
-		REG_WRITE(map->vsync,
-			(adjusted_mode->crtc_vsync_start - offsetY - 1) |
-			((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
+		for (i = 0; i <= need_aux; i++) {
+			REG_WRITE_WITH_AUX(map->htotal, (mode->crtc_hdisplay - 1) |
+				((adjusted_mode->crtc_htotal - 1) << 16), i);
+			REG_WRITE_WITH_AUX(map->vtotal, (mode->crtc_vdisplay - 1) |
+				((adjusted_mode->crtc_vtotal - 1) << 16), i);
+			REG_WRITE_WITH_AUX(map->hblank,
+				(adjusted_mode->crtc_hblank_start - offsetX - 1) |
+				((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16), i);
+			REG_WRITE_WITH_AUX(map->hsync,
+				(adjusted_mode->crtc_hsync_start - offsetX - 1) |
+				((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16), i);
+			REG_WRITE_WITH_AUX(map->vblank,
+				(adjusted_mode->crtc_vblank_start - offsetY - 1) |
+				((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16), i);
+			REG_WRITE_WITH_AUX(map->vsync,
+				(adjusted_mode->crtc_vsync_start - offsetY - 1) |
+				((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16), i);
+		}
 	} else {
 	} else {
-		REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
-			((adjusted_mode->crtc_htotal - 1) << 16));
-		REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
-			((adjusted_mode->crtc_vtotal - 1) << 16));
-		REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
-			((adjusted_mode->crtc_hblank_end - 1) << 16));
-		REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
-			((adjusted_mode->crtc_hsync_end - 1) << 16));
-		REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
-			((adjusted_mode->crtc_vblank_end - 1) << 16));
-		REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
-			((adjusted_mode->crtc_vsync_end - 1) << 16));
+		for (i = 0; i <= need_aux; i++) {
+			REG_WRITE_WITH_AUX(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
+				((adjusted_mode->crtc_htotal - 1) << 16), i);
+			REG_WRITE_WITH_AUX(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
+				((adjusted_mode->crtc_vtotal - 1) << 16), i);
+			REG_WRITE_WITH_AUX(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
+				((adjusted_mode->crtc_hblank_end - 1) << 16), i);
+			REG_WRITE_WITH_AUX(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
+				((adjusted_mode->crtc_hsync_end - 1) << 16), i);
+			REG_WRITE_WITH_AUX(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
+				((adjusted_mode->crtc_vblank_end - 1) << 16), i);
+			REG_WRITE_WITH_AUX(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
+				((adjusted_mode->crtc_vsync_end - 1) << 16), i);
+		}
 	}
 	}
 
 
 	/* Flush the plane changes */
 	/* Flush the plane changes */
@@ -418,21 +503,30 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
 	if (is_mipi)
 	if (is_mipi)
 		goto oaktrail_crtc_mode_set_exit;
 		goto oaktrail_crtc_mode_set_exit;
 
 
-	refclk = dev_priv->core_freq * 1000;
 
 
 	dpll = 0;		/*BIT16 = 0 for 100MHz reference */
 	dpll = 0;		/*BIT16 = 0 for 100MHz reference */
 
 
-	ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
+	refclk = is_sdvo ? 96000 : dev_priv->core_freq * 1000;
+	limit = mrst_limit(crtc, refclk);
+	ok = limit->find_pll(limit, crtc, adjusted_mode->clock,
+			     refclk, &clock);
 
 
-	if (!ok) {
-		dev_dbg(dev->dev, "mrstFindBestPLL fail in oaktrail_crtc_mode_set.\n");
-	} else {
-		dev_dbg(dev->dev, "oaktrail_crtc_mode_set pixel clock = %d,"
-			 "m = %x, p1 = %x.\n", clock.dot, clock.m,
-			 clock.p1);
+	if (is_sdvo) {
+		/* Convert calculated values to register values */
+		clock.p1 = (1L << (clock.p1 - 1));
+		clock.m -= 2;
+		clock.n = (1L << (clock.n - 1));
 	}
 	}
 
 
-	fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
+	if (!ok)
+		DRM_ERROR("Failed to find proper PLL settings");
+
+	mrst_print_pll(&clock);
+
+	if (is_sdvo)
+		fp = clock.n << 16 | clock.m;
+	else
+		fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
 
 
 	dpll |= DPLL_VGA_MODE_DIS;
 	dpll |= DPLL_VGA_MODE_DIS;
 
 
@@ -456,38 +550,43 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
 
 
 
 
 	/* compute bitmask from p1 value */
 	/* compute bitmask from p1 value */
-	dpll |= (1 << (clock.p1 - 2)) << 17;
+	if (is_sdvo)
+		dpll |= clock.p1 << 16; // dpll |= (1 << (clock.p1 - 1)) << 16;
+	else
+		dpll |= (1 << (clock.p1 - 2)) << 17;
 
 
 	dpll |= DPLL_VCO_ENABLE;
 	dpll |= DPLL_VCO_ENABLE;
 
 
-	mrstPrintPll("chosen", &clock);
-
 	if (dpll & DPLL_VCO_ENABLE) {
 	if (dpll & DPLL_VCO_ENABLE) {
-		REG_WRITE(map->fp0, fp);
-		REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE);
-		REG_READ(map->dpll);
-		/* Check the DPLLA lock bit PIPEACONF[29] */
-		udelay(150);
+		for (i = 0; i <= need_aux; i++) {
+			REG_WRITE_WITH_AUX(map->fp0, fp, i);
+			REG_WRITE_WITH_AUX(map->dpll, dpll & ~DPLL_VCO_ENABLE, i);
+			REG_READ_WITH_AUX(map->dpll, i);
+			/* Check the DPLLA lock bit PIPEACONF[29] */
+			udelay(150);
+		}
 	}
 	}
 
 
-	REG_WRITE(map->fp0, fp);
-	REG_WRITE(map->dpll, dpll);
-	REG_READ(map->dpll);
-	/* Wait for the clocks to stabilize. */
-	udelay(150);
+	for (i = 0; i <= need_aux; i++) {
+		REG_WRITE_WITH_AUX(map->fp0, fp, i);
+		REG_WRITE_WITH_AUX(map->dpll, dpll, i);
+		REG_READ_WITH_AUX(map->dpll, i);
+		/* Wait for the clocks to stabilize. */
+		udelay(150);
 
 
-	/* write it again -- the BIOS does, after all */
-	REG_WRITE(map->dpll, dpll);
-	REG_READ(map->dpll);
-	/* Wait for the clocks to stabilize. */
-	udelay(150);
+		/* write it again -- the BIOS does, after all */
+		REG_WRITE_WITH_AUX(map->dpll, dpll, i);
+		REG_READ_WITH_AUX(map->dpll, i);
+		/* Wait for the clocks to stabilize. */
+		udelay(150);
 
 
-	REG_WRITE(map->conf, pipeconf);
-	REG_READ(map->conf);
-	gma_wait_for_vblank(dev);
+		REG_WRITE_WITH_AUX(map->conf, pipeconf, i);
+		REG_READ_WITH_AUX(map->conf, i);
+		gma_wait_for_vblank(dev);
 
 
-	REG_WRITE(map->cntr, dspcntr);
-	gma_wait_for_vblank(dev);
+		REG_WRITE_WITH_AUX(map->cntr, dspcntr, i);
+		gma_wait_for_vblank(dev);
+	}
 
 
 oaktrail_crtc_mode_set_exit:
 oaktrail_crtc_mode_set_exit:
 	gma_power_end(dev);
 	gma_power_end(dev);
@@ -565,3 +664,9 @@ const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
 	.commit = gma_crtc_commit,
 	.commit = gma_crtc_commit,
 };
 };
 
 
+/* Not used yet */
+const struct gma_clock_funcs mrst_clock_funcs = {
+	.clock = mrst_lvds_clock,
+	.limit = mrst_limit,
+	.pll_is_valid = gma_pll_is_valid,
+};

+ 6 - 0
drivers/gpu/drm/gma500/oaktrail_device.c

@@ -40,6 +40,9 @@ static int oaktrail_output_init(struct drm_device *dev)
 		dev_err(dev->dev, "DSI is not supported\n");
 		dev_err(dev->dev, "DSI is not supported\n");
 	if (dev_priv->hdmi_priv)
 	if (dev_priv->hdmi_priv)
 		oaktrail_hdmi_init(dev, &dev_priv->mode_dev);
 		oaktrail_hdmi_init(dev, &dev_priv->mode_dev);
+
+	psb_intel_sdvo_init(dev, SDVOB);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -526,6 +529,7 @@ static int oaktrail_chip_setup(struct drm_device *dev)
 		psb_intel_opregion_init(dev);
 		psb_intel_opregion_init(dev);
 		psb_intel_init_bios(dev);
 		psb_intel_init_bios(dev);
 	}
 	}
+	gma_intel_setup_gmbus(dev);
 	oaktrail_hdmi_setup(dev);
 	oaktrail_hdmi_setup(dev);
 	return 0;
 	return 0;
 }
 }
@@ -534,6 +538,7 @@ static void oaktrail_teardown(struct drm_device *dev)
 {
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct drm_psb_private *dev_priv = dev->dev_private;
 
 
+	gma_intel_teardown_gmbus(dev);
 	oaktrail_hdmi_teardown(dev);
 	oaktrail_hdmi_teardown(dev);
 	if (!dev_priv->has_gct)
 	if (!dev_priv->has_gct)
 		psb_intel_destroy_bios(dev);
 		psb_intel_destroy_bios(dev);
@@ -546,6 +551,7 @@ const struct psb_ops oaktrail_chip_ops = {
 	.crtcs = 2,
 	.crtcs = 2,
 	.hdmi_mask = (1 << 1),
 	.hdmi_mask = (1 << 1),
 	.lvds_mask = (1 << 0),
 	.lvds_mask = (1 << 0),
+	.sdvo_mask = (1 << 1),
 	.cursor_needs_phys = 0,
 	.cursor_needs_phys = 0,
 	.sgx_offset = MRST_SGX_OFFSET,
 	.sgx_offset = MRST_SGX_OFFSET,
 
 

+ 3 - 27
drivers/gpu/drm/gma500/oaktrail_lvds.c

@@ -218,30 +218,6 @@ static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = {
 	.commit = oaktrail_lvds_commit,
 	.commit = oaktrail_lvds_commit,
 };
 };
 
 
-static struct drm_display_mode lvds_configuration_modes[] = {
-	/* hard coded fixed mode for TPO LTPS LPJ040K001A */
-	{ DRM_MODE("800x480",  DRM_MODE_TYPE_DRIVER, 33264, 800, 836,
-		   846, 1056, 0, 480, 489, 491, 525, 0, 0) },
-	/* hard coded fixed mode for LVDS 800x480 */
-	{ DRM_MODE("800x480",  DRM_MODE_TYPE_DRIVER, 30994, 800, 801,
-		   802, 1024, 0, 480, 481, 482, 525, 0, 0) },
-	/* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
-	{ DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1072,
-		   1104, 1184, 0, 600, 603, 604, 608, 0, 0) },
-	/* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
-	{ DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1104,
-		   1136, 1184, 0, 600, 603, 604, 608, 0, 0) },
-	/* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
-	{ DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 48885, 1024, 1124,
-		   1204, 1312, 0, 600, 607, 610, 621, 0, 0) },
-	/* hard coded fixed mode for LVDS 1024x768 */
-	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
-		   1184, 1344, 0, 768, 771, 777, 806, 0, 0) },
-	/* hard coded fixed mode for LVDS 1366x768 */
-	{ DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 77500, 1366, 1430,
-		   1558, 1664, 0, 768, 769, 770, 776, 0, 0) },
-};
-
 /* Returns the panel fixed mode from configuration. */
 /* Returns the panel fixed mode from configuration. */
 
 
 static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
 static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
@@ -303,10 +279,10 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
 			mode_dev->panel_fixed_mode =
 			mode_dev->panel_fixed_mode =
 				drm_mode_duplicate(dev,
 				drm_mode_duplicate(dev,
 					dev_priv->lfp_lvds_vbt_mode);
 					dev_priv->lfp_lvds_vbt_mode);
-	/* Then guess */
+
+	/* If we still got no mode then bail */
 	if (mode_dev->panel_fixed_mode == NULL)
 	if (mode_dev->panel_fixed_mode == NULL)
-		mode_dev->panel_fixed_mode
-			= drm_mode_duplicate(dev, &lvds_configuration_modes[2]);
+		return;
 
 
 	drm_mode_set_name(mode_dev->panel_fixed_mode);
 	drm_mode_set_name(mode_dev->panel_fixed_mode);
 	drm_mode_set_crtcinfo(mode_dev->panel_fixed_mode, 0);
 	drm_mode_set_crtcinfo(mode_dev->panel_fixed_mode, 0);

+ 1 - 0
drivers/gpu/drm/gma500/psb_device.c

@@ -373,6 +373,7 @@ const struct psb_ops psb_chip_ops = {
 	.crtcs = 2,
 	.crtcs = 2,
 	.hdmi_mask = (1 << 0),
 	.hdmi_mask = (1 << 0),
 	.lvds_mask = (1 << 1),
 	.lvds_mask = (1 << 1),
+	.sdvo_mask = (1 << 0),
 	.cursor_needs_phys = 1,
 	.cursor_needs_phys = 1,
 	.sgx_offset = PSB_SGX_OFFSET,
 	.sgx_offset = PSB_SGX_OFFSET,
 	.chip_setup = psb_chip_setup,
 	.chip_setup = psb_chip_setup,

+ 34 - 5
drivers/gpu/drm/gma500/psb_drv.c

@@ -251,6 +251,12 @@ static int psb_driver_unload(struct drm_device *dev)
 			iounmap(dev_priv->sgx_reg);
 			iounmap(dev_priv->sgx_reg);
 			dev_priv->sgx_reg = NULL;
 			dev_priv->sgx_reg = NULL;
 		}
 		}
+		if (dev_priv->aux_reg) {
+			iounmap(dev_priv->aux_reg);
+			dev_priv->aux_reg = NULL;
+		}
+		if (dev_priv->aux_pdev)
+			pci_dev_put(dev_priv->aux_pdev);
 
 
 		/* Destroy VBT data */
 		/* Destroy VBT data */
 		psb_intel_destroy_bios(dev);
 		psb_intel_destroy_bios(dev);
@@ -266,7 +272,7 @@ static int psb_driver_unload(struct drm_device *dev)
 static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
 static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
 {
 {
 	struct drm_psb_private *dev_priv;
 	struct drm_psb_private *dev_priv;
-	unsigned long resource_start;
+	unsigned long resource_start, resource_len;
 	unsigned long irqflags;
 	unsigned long irqflags;
 	int ret = -ENOMEM;
 	int ret = -ENOMEM;
 	struct drm_connector *connector;
 	struct drm_connector *connector;
@@ -296,6 +302,30 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
 	if (!dev_priv->sgx_reg)
 	if (!dev_priv->sgx_reg)
 		goto out_err;
 		goto out_err;
 
 
+	if (IS_MRST(dev)) {
+		dev_priv->aux_pdev = pci_get_bus_and_slot(0, PCI_DEVFN(3, 0));
+
+		if (dev_priv->aux_pdev) {
+			resource_start = pci_resource_start(dev_priv->aux_pdev,
+							    PSB_AUX_RESOURCE);
+			resource_len = pci_resource_len(dev_priv->aux_pdev,
+							PSB_AUX_RESOURCE);
+			dev_priv->aux_reg = ioremap_nocache(resource_start,
+							    resource_len);
+			if (!dev_priv->aux_reg)
+				goto out_err;
+
+			DRM_DEBUG_KMS("Found aux vdc");
+		} else {
+			/* Couldn't find the aux vdc so map to primary vdc */
+			dev_priv->aux_reg = dev_priv->vdc_reg;
+			DRM_DEBUG_KMS("Couldn't find aux pci device");
+		}
+		dev_priv->gmbus_reg = dev_priv->aux_reg;
+	} else {
+		dev_priv->gmbus_reg = dev_priv->vdc_reg;
+	}
+
 	psb_intel_opregion_setup(dev);
 	psb_intel_opregion_setup(dev);
 
 
 	ret = dev_priv->ops->chip_setup(dev);
 	ret = dev_priv->ops->chip_setup(dev);
@@ -359,7 +389,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
 
 
 	drm_irq_install(dev);
 	drm_irq_install(dev);
 
 
-	dev->vblank_disable_allowed = 1;
+	dev->vblank_disable_allowed = true;
 
 
 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
 
 
@@ -449,7 +479,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
 	obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
 	obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
 	if (!obj) {
 	if (!obj) {
 		dev_dbg(dev->dev, "Invalid Connector object.\n");
 		dev_dbg(dev->dev, "Invalid Connector object.\n");
-		return -EINVAL;
+		return -ENOENT;
 	}
 	}
 
 
 	connector = obj_to_connector(obj);
 	connector = obj_to_connector(obj);
@@ -491,7 +521,7 @@ static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
 		obj = drm_mode_object_find(dev, obj_id,
 		obj = drm_mode_object_find(dev, obj_id,
 					DRM_MODE_OBJECT_CONNECTOR);
 					DRM_MODE_OBJECT_CONNECTOR);
 		if (!obj) {
 		if (!obj) {
-			ret = -EINVAL;
+			ret = -ENOENT;
 			goto mode_op_out;
 			goto mode_op_out;
 		}
 		}
 
 
@@ -646,7 +676,6 @@ static struct drm_driver driver = {
 	.preclose = psb_driver_preclose,
 	.preclose = psb_driver_preclose,
 	.postclose = psb_driver_close,
 	.postclose = psb_driver_close,
 
 
-	.gem_init_object = psb_gem_init_object,
 	.gem_free_object = psb_gem_free_object,
 	.gem_free_object = psb_gem_free_object,
 	.gem_vm_ops = &psb_gem_vm_ops,
 	.gem_vm_ops = &psb_gem_vm_ops,
 	.dumb_create = psb_gem_dumb_create,
 	.dumb_create = psb_gem_dumb_create,

+ 52 - 6
drivers/gpu/drm/gma500/psb_drv.h

@@ -44,10 +44,10 @@ enum {
 	CHIP_MFLD_0130 = 3,		/* Medfield */
 	CHIP_MFLD_0130 = 3,		/* Medfield */
 };
 };
 
 
-#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
-#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
-#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
-#define IS_CDV(dev) (((dev)->pci_device & 0xfff0) == 0x0be0)
+#define IS_PSB(dev) (((dev)->pdev->device & 0xfffe) == 0x8108)
+#define IS_MRST(dev) (((dev)->pdev->device & 0xfff0) == 0x4100)
+#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130)
+#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0)
 
 
 /*
 /*
  * Driver definitions
  * Driver definitions
@@ -75,6 +75,7 @@ enum {
  *	PCI resource identifiers
  *	PCI resource identifiers
  */
  */
 #define PSB_MMIO_RESOURCE	 0
 #define PSB_MMIO_RESOURCE	 0
+#define PSB_AUX_RESOURCE	 0
 #define PSB_GATT_RESOURCE	 2
 #define PSB_GATT_RESOURCE	 2
 #define PSB_GTT_RESOURCE	 3
 #define PSB_GTT_RESOURCE	 3
 /*
 /*
@@ -455,6 +456,7 @@ struct psb_ops;
 
 
 struct drm_psb_private {
 struct drm_psb_private {
 	struct drm_device *dev;
 	struct drm_device *dev;
+	struct pci_dev *aux_pdev; /* Currently only used by mrst */
 	const struct psb_ops *ops;
 	const struct psb_ops *ops;
 	const struct psb_offset *regmap;
 	const struct psb_offset *regmap;
 	
 	
@@ -486,6 +488,7 @@ struct drm_psb_private {
 
 
 	uint8_t __iomem *sgx_reg;
 	uint8_t __iomem *sgx_reg;
 	uint8_t __iomem *vdc_reg;
 	uint8_t __iomem *vdc_reg;
+	uint8_t __iomem *aux_reg; /* Auxillary vdc pipe regs */
 	uint32_t gatt_free_offset;
 	uint32_t gatt_free_offset;
 
 
 	/*
 	/*
@@ -532,6 +535,7 @@ struct drm_psb_private {
 
 
 	/* gmbus */
 	/* gmbus */
 	struct intel_gmbus *gmbus;
 	struct intel_gmbus *gmbus;
+	uint8_t __iomem *gmbus_reg;
 
 
 	/* Used by SDVO */
 	/* Used by SDVO */
 	int crt_ddc_pin;
 	int crt_ddc_pin;
@@ -672,6 +676,7 @@ struct psb_ops {
 	int sgx_offset;		/* Base offset of SGX device */
 	int sgx_offset;		/* Base offset of SGX device */
 	int hdmi_mask;		/* Mask of HDMI CRTCs */
 	int hdmi_mask;		/* Mask of HDMI CRTCs */
 	int lvds_mask;		/* Mask of LVDS CRTCs */
 	int lvds_mask;		/* Mask of LVDS CRTCs */
+	int sdvo_mask;		/* Mask of SDVO CRTCs */
 	int cursor_needs_phys;  /* If cursor base reg need physical address */
 	int cursor_needs_phys;  /* If cursor base reg need physical address */
 
 
 	/* Sub functions */
 	/* Sub functions */
@@ -837,7 +842,6 @@ extern const struct drm_connector_helper_funcs
 extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
 extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
 
 
 /* gem.c */
 /* gem.c */
-extern int psb_gem_init_object(struct drm_gem_object *obj);
 extern void psb_gem_free_object(struct drm_gem_object *obj);
 extern void psb_gem_free_object(struct drm_gem_object *obj);
 extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
 extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
 			struct drm_file *file);
 			struct drm_file *file);
@@ -928,16 +932,58 @@ static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
 	return ioread32(dev_priv->vdc_reg + reg);
 	return ioread32(dev_priv->vdc_reg + reg);
 }
 }
 
 
+static inline uint32_t REGISTER_READ_AUX(struct drm_device *dev, uint32_t reg)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	return ioread32(dev_priv->aux_reg + reg);
+}
+
 #define REG_READ(reg)	       REGISTER_READ(dev, (reg))
 #define REG_READ(reg)	       REGISTER_READ(dev, (reg))
+#define REG_READ_AUX(reg)      REGISTER_READ_AUX(dev, (reg))
+
+/* Useful for post reads */
+static inline uint32_t REGISTER_READ_WITH_AUX(struct drm_device *dev,
+					      uint32_t reg, int aux)
+{
+	uint32_t val;
+
+	if (aux)
+		val = REG_READ_AUX(reg);
+	else
+		val = REG_READ(reg);
+
+	return val;
+}
+
+#define REG_READ_WITH_AUX(reg, aux) REGISTER_READ_WITH_AUX(dev, (reg), (aux))
 
 
 static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
 static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
-				      uint32_t val)
+				  uint32_t val)
 {
 {
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	iowrite32((val), dev_priv->vdc_reg + (reg));
 	iowrite32((val), dev_priv->vdc_reg + (reg));
 }
 }
 
 
+static inline void REGISTER_WRITE_AUX(struct drm_device *dev, uint32_t reg,
+				      uint32_t val)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	iowrite32((val), dev_priv->aux_reg + (reg));
+}
+
 #define REG_WRITE(reg, val)	REGISTER_WRITE(dev, (reg), (val))
 #define REG_WRITE(reg, val)	REGISTER_WRITE(dev, (reg), (val))
+#define REG_WRITE_AUX(reg, val)	REGISTER_WRITE_AUX(dev, (reg), (val))
+
+static inline void REGISTER_WRITE_WITH_AUX(struct drm_device *dev, uint32_t reg,
+				      uint32_t val, int aux)
+{
+	if (aux)
+		REG_WRITE_AUX(reg, val);
+	else
+		REG_WRITE(reg, val);
+}
+
+#define REG_WRITE_WITH_AUX(reg, val, aux) REGISTER_WRITE_WITH_AUX(dev, (reg), (val), (aux))
 
 
 static inline void REGISTER_WRITE16(struct drm_device *dev,
 static inline void REGISTER_WRITE16(struct drm_device *dev,
 					uint32_t reg, uint32_t val)
 					uint32_t reg, uint32_t val)

+ 1 - 1
drivers/gpu/drm/gma500/psb_intel_display.c

@@ -572,7 +572,7 @@ int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
 
 
 	if (!drmmode_obj) {
 	if (!drmmode_obj) {
 		dev_err(dev->dev, "no such CRTC id\n");
 		dev_err(dev->dev, "no such CRTC id\n");
-		return -EINVAL;
+		return -ENOENT;
 	}
 	}
 
 
 	crtc = to_gma_crtc(obj_to_crtc(drmmode_obj));
 	crtc = to_gma_crtc(obj_to_crtc(drmmode_obj));

+ 38 - 21
drivers/gpu/drm/gma500/psb_intel_sdvo.c

@@ -228,24 +228,26 @@ static void psb_intel_sdvo_write_sdvox(struct psb_intel_sdvo *psb_intel_sdvo, u3
 {
 {
 	struct drm_device *dev = psb_intel_sdvo->base.base.dev;
 	struct drm_device *dev = psb_intel_sdvo->base.base.dev;
 	u32 bval = val, cval = val;
 	u32 bval = val, cval = val;
-	int i;
+	int i, j;
+	int need_aux = IS_MRST(dev) ? 1 : 0;
 
 
-	if (psb_intel_sdvo->sdvo_reg == SDVOB) {
-		cval = REG_READ(SDVOC);
-	} else {
-		bval = REG_READ(SDVOB);
-	}
-	/*
-	 * Write the registers twice for luck. Sometimes,
-	 * writing them only once doesn't appear to 'stick'.
-	 * The BIOS does this too. Yay, magic
-	 */
-	for (i = 0; i < 2; i++)
-	{
-		REG_WRITE(SDVOB, bval);
-		REG_READ(SDVOB);
-		REG_WRITE(SDVOC, cval);
-		REG_READ(SDVOC);
+	for (j = 0; j <= need_aux; j++) {
+		if (psb_intel_sdvo->sdvo_reg == SDVOB)
+			cval = REG_READ_WITH_AUX(SDVOC, j);
+		else
+			bval = REG_READ_WITH_AUX(SDVOB, j);
+
+		/*
+		* Write the registers twice for luck. Sometimes,
+		* writing them only once doesn't appear to 'stick'.
+		* The BIOS does this too. Yay, magic
+		*/
+		for (i = 0; i < 2; i++) {
+			REG_WRITE_WITH_AUX(SDVOB, bval, j);
+			REG_READ_WITH_AUX(SDVOB, j);
+			REG_WRITE_WITH_AUX(SDVOC, cval, j);
+			REG_READ_WITH_AUX(SDVOC, j);
+		}
 	}
 	}
 }
 }
 
 
@@ -995,6 +997,7 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
 	struct psb_intel_sdvo_dtd input_dtd;
 	struct psb_intel_sdvo_dtd input_dtd;
 	int pixel_multiplier = psb_intel_mode_get_pixel_multiplier(adjusted_mode);
 	int pixel_multiplier = psb_intel_mode_get_pixel_multiplier(adjusted_mode);
 	int rate;
 	int rate;
+	int need_aux = IS_MRST(dev) ? 1 : 0;
 
 
 	if (!mode)
 	if (!mode)
 		return;
 		return;
@@ -1060,7 +1063,11 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
 		return;
 		return;
 
 
 	/* Set the SDVO control regs. */
 	/* Set the SDVO control regs. */
-	sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
+	if (need_aux)
+		sdvox = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
+	else
+		sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
+
 	switch (psb_intel_sdvo->sdvo_reg) {
 	switch (psb_intel_sdvo->sdvo_reg) {
 	case SDVOB:
 	case SDVOB:
 		sdvox &= SDVOB_PRESERVE_MASK;
 		sdvox &= SDVOB_PRESERVE_MASK;
@@ -1090,6 +1097,8 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
 	struct drm_device *dev = encoder->dev;
 	struct drm_device *dev = encoder->dev;
 	struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
 	struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
 	u32 temp;
 	u32 temp;
+	int i;
+	int need_aux = IS_MRST(dev) ? 1 : 0;
 
 
 	switch (mode) {
 	switch (mode) {
 	case DRM_MODE_DPMS_ON:
 	case DRM_MODE_DPMS_ON:
@@ -1108,19 +1117,27 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
 			psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
 			psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
 
 
 		if (mode == DRM_MODE_DPMS_OFF) {
 		if (mode == DRM_MODE_DPMS_OFF) {
-			temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+			if (need_aux)
+				temp = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
+			else
+				temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+
 			if ((temp & SDVO_ENABLE) != 0) {
 			if ((temp & SDVO_ENABLE) != 0) {
 				psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp & ~SDVO_ENABLE);
 				psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp & ~SDVO_ENABLE);
 			}
 			}
 		}
 		}
 	} else {
 	} else {
 		bool input1, input2;
 		bool input1, input2;
-		int i;
 		u8 status;
 		u8 status;
 
 
-		temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+		if (need_aux)
+			temp = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
+		else
+			temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+
 		if ((temp & SDVO_ENABLE) == 0)
 		if ((temp & SDVO_ENABLE) == 0)
 			psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
 			psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
+
 		for (i = 0; i < 2; i++)
 		for (i = 0; i < 2; i++)
 			gma_wait_for_vblank(dev);
 			gma_wait_for_vblank(dev);
 
 

+ 11 - 11
drivers/gpu/drm/gma500/psb_irq.c

@@ -271,15 +271,15 @@ void psb_irq_preinstall(struct drm_device *dev)
 
 
 	if (gma_power_is_on(dev))
 	if (gma_power_is_on(dev))
 		PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
 		PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
-	if (dev->vblank_enabled[0])
+	if (dev->vblank[0].enabled)
 		dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
 		dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
-	if (dev->vblank_enabled[1])
+	if (dev->vblank[1].enabled)
 		dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
 		dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
 
 
 	/* FIXME: Handle Medfield irq mask
 	/* FIXME: Handle Medfield irq mask
-	if (dev->vblank_enabled[1])
+	if (dev->vblank[1].enabled)
 		dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
 		dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
-	if (dev->vblank_enabled[2])
+	if (dev->vblank[2].enabled)
 		dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
 		dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
 	*/
 	*/
 
 
@@ -305,17 +305,17 @@ int psb_irq_postinstall(struct drm_device *dev)
 	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
 	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
 	PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
 	PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
 
 
-	if (dev->vblank_enabled[0])
+	if (dev->vblank[0].enabled)
 		psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
 		psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
 	else
 	else
 		psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
 		psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
 
 
-	if (dev->vblank_enabled[1])
+	if (dev->vblank[1].enabled)
 		psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
 		psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
 	else
 	else
 		psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
 		psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
 
 
-	if (dev->vblank_enabled[2])
+	if (dev->vblank[2].enabled)
 		psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
 		psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
 	else
 	else
 		psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
 		psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
@@ -339,13 +339,13 @@ void psb_irq_uninstall(struct drm_device *dev)
 
 
 	PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
 	PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
 
 
-	if (dev->vblank_enabled[0])
+	if (dev->vblank[0].enabled)
 		psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
 		psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
 
 
-	if (dev->vblank_enabled[1])
+	if (dev->vblank[1].enabled)
 		psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
 		psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
 
 
-	if (dev->vblank_enabled[2])
+	if (dev->vblank[2].enabled)
 		psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
 		psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
 
 
 	dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
 	dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
@@ -456,7 +456,7 @@ static int psb_vblank_do_wait(struct drm_device *dev,
 {
 {
 	unsigned int cur_vblank;
 	unsigned int cur_vblank;
 	int ret = 0;
 	int ret = 0;
-	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+	DRM_WAIT_ON(ret, dev->vblank.queue, 3 * DRM_HZ,
 		    (((cur_vblank = atomic_read(counter))
 		    (((cur_vblank = atomic_read(counter))
 		      - *sequence) <= (1 << 23)));
 		      - *sequence) <= (1 << 23)));
 	*sequence = cur_vblank;
 	*sequence = cur_vblank;

+ 3 - 0
drivers/gpu/drm/i2c/tda998x_drv.c

@@ -17,6 +17,7 @@
 
 
 
 
 
 
+#include <linux/hdmi.h>
 #include <linux/module.h>
 #include <linux/module.h>
 
 
 #include <drm/drmP.h>
 #include <drm/drmP.h>
@@ -549,6 +550,8 @@ tda998x_write_avi(struct drm_encoder *encoder, struct drm_display_mode *mode)
 	buf[HB(0)] = 0x82;
 	buf[HB(0)] = 0x82;
 	buf[HB(1)] = 0x02;
 	buf[HB(1)] = 0x02;
 	buf[HB(2)] = 13;
 	buf[HB(2)] = 13;
+	buf[PB(1)] = HDMI_SCAN_MODE_UNDERSCAN;
+	buf[PB(3)] = HDMI_QUANTIZATION_RANGE_FULL << 2;
 	buf[PB(4)] = drm_match_cea_mode(mode);
 	buf[PB(4)] = drm_match_cea_mode(mode);
 
 
 	tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,
 	tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,

+ 0 - 11
drivers/gpu/drm/i810/i810_dma.c

@@ -944,8 +944,6 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
 				 dma->buflist[vertex->idx],
 				 dma->buflist[vertex->idx],
 				 vertex->discard, vertex->used);
 				 vertex->discard, vertex->used);
 
 
-	atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
-	atomic_inc(&dev->counts[_DRM_STAT_DMA]);
 	sarea_priv->last_enqueue = dev_priv->counter - 1;
 	sarea_priv->last_enqueue = dev_priv->counter - 1;
 	sarea_priv->last_dispatch = (int)hw_status[5];
 	sarea_priv->last_dispatch = (int)hw_status[5];
 
 
@@ -1105,8 +1103,6 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
 	i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
 	i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
 			     mc->last_render);
 			     mc->last_render);
 
 
-	atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
-	atomic_inc(&dev->counts[_DRM_STAT_DMA]);
 	sarea_priv->last_enqueue = dev_priv->counter - 1;
 	sarea_priv->last_enqueue = dev_priv->counter - 1;
 	sarea_priv->last_dispatch = (int)hw_status[5];
 	sarea_priv->last_dispatch = (int)hw_status[5];
 
 
@@ -1197,13 +1193,6 @@ static int i810_flip_bufs(struct drm_device *dev, void *data,
 
 
 int i810_driver_load(struct drm_device *dev, unsigned long flags)
 int i810_driver_load(struct drm_device *dev, unsigned long flags)
 {
 {
-	/* i810 has 4 more counters */
-	dev->counters += 4;
-	dev->types[6] = _DRM_STAT_IRQ;
-	dev->types[7] = _DRM_STAT_PRIMARY;
-	dev->types[8] = _DRM_STAT_SECONDARY;
-	dev->types[9] = _DRM_STAT_DMA;
-
 	pci_set_master(dev->pdev);
 	pci_set_master(dev->pdev);
 
 
 	return 0;
 	return 0;

+ 67 - 0
drivers/gpu/drm/i915/Kconfig

@@ -0,0 +1,67 @@
+config DRM_I915
+	tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
+	depends on DRM
+	depends on AGP
+	depends on AGP_INTEL
+	# we need shmfs for the swappable backing store, and in particular
+	# the shmem_readpage() which depends upon tmpfs
+	select SHMEM
+	select TMPFS
+	select DRM_KMS_HELPER
+	# i915 depends on ACPI_VIDEO when ACPI is enabled
+	# but for select to work, need to select ACPI_VIDEO's dependencies, ick
+	select BACKLIGHT_LCD_SUPPORT if ACPI
+	select BACKLIGHT_CLASS_DEVICE if ACPI
+	select VIDEO_OUTPUT_CONTROL if ACPI
+	select INPUT if ACPI
+	select ACPI_VIDEO if ACPI
+	select ACPI_BUTTON if ACPI
+	help
+	  Choose this option if you have a system that has "Intel Graphics
+	  Media Accelerator" or "HD Graphics" integrated graphics,
+	  including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
+	  G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
+	  Core i5, Core i7 as well as Atom CPUs with integrated graphics.
+	  If M is selected, the module will be called i915.  AGP support
+	  is required for this driver to work. This driver is used by
+	  the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
+	  replaces the older i830 module that supported a subset of the
+	  hardware in older X.org releases.
+
+	  Note that the older i810/i815 chipsets require the use of the
+	  i810 driver instead, and the Atom z5xx series has an entirely
+	  different implementation.
+
+config DRM_I915_KMS
+	bool "Enable modesetting on intel by default"
+	depends on DRM_I915
+	help
+	  Choose this option if you want kernel modesetting enabled by default,
+	  and you have a new enough userspace to support this. Running old
+	  userspaces with this enabled will cause pain.  Note that this causes
+	  the driver to bind to PCI devices, which precludes loading things
+	  like intelfb.
+
+config DRM_I915_FBDEV
+	bool "Enable legacy fbdev support for the modesettting intel driver"
+	depends on DRM_I915
+	select DRM_KMS_FB_HELPER
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	default y
+	help
+	  Choose this option if you have a need for the legacy fbdev
+	  support. Note that this support also provide the linux console
+	  support on top of the intel modesetting driver.
+
+config DRM_I915_PRELIMINARY_HW_SUPPORT
+	bool "Enable preliminary support for prerelease Intel hardware by default"
+	depends on DRM_I915
+	help
+	  Choose this option if you have prerelease Intel hardware and want the
+	  i915 driver to support it by default.  You can enable such support at
+	  runtime with the module option i915.preliminary_hw_support=1; this
+	  option changes the default for that module option.
+
+	  If in doubt, say "N".

+ 5 - 1
drivers/gpu/drm/i915/Makefile

@@ -21,6 +21,9 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
 	  intel_display.o \
 	  intel_display.o \
 	  intel_crt.o \
 	  intel_crt.o \
 	  intel_lvds.o \
 	  intel_lvds.o \
+	  intel_dsi.o \
+	  intel_dsi_cmd.o \
+	  intel_dsi_pll.o \
 	  intel_bios.o \
 	  intel_bios.o \
 	  intel_ddi.o \
 	  intel_ddi.o \
 	  intel_dp.o \
 	  intel_dp.o \
@@ -30,7 +33,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
 	  intel_panel.o \
 	  intel_panel.o \
 	  intel_pm.o \
 	  intel_pm.o \
 	  intel_i2c.o \
 	  intel_i2c.o \
-	  intel_fb.o \
 	  intel_tv.o \
 	  intel_tv.o \
 	  intel_dvo.o \
 	  intel_dvo.o \
 	  intel_ringbuffer.o \
 	  intel_ringbuffer.o \
@@ -51,6 +53,8 @@ i915-$(CONFIG_COMPAT)   += i915_ioc32.o
 
 
 i915-$(CONFIG_ACPI)	+= intel_acpi.o
 i915-$(CONFIG_ACPI)	+= intel_acpi.o
 
 
+i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
+
 obj-$(CONFIG_DRM_I915)  += i915.o
 obj-$(CONFIG_DRM_I915)  += i915.o
 
 
 CFLAGS_i915_trace_points.o := -I$(src)
 CFLAGS_i915_trace_points.o := -I$(src)

+ 0 - 11
drivers/gpu/drm/i915/dvo.h

@@ -76,17 +76,6 @@ struct intel_dvo_dev_ops {
 	int (*mode_valid)(struct intel_dvo_device *dvo,
 	int (*mode_valid)(struct intel_dvo_device *dvo,
 			  struct drm_display_mode *mode);
 			  struct drm_display_mode *mode);
 
 
-	/*
-	 * Callback to adjust the mode to be set in the CRTC.
-	 *
-	 * This allows an output to adjust the clock or even the entire set of
-	 * timings, which is used for panels with fixed timings or for
-	 * buses with clock limitations.
-	 */
-	bool (*mode_fixup)(struct intel_dvo_device *dvo,
-			   const struct drm_display_mode *mode,
-			   struct drm_display_mode *adjusted_mode);
-
 	/*
 	/*
 	 * Callback for preparing mode changes on an output
 	 * Callback for preparing mode changes on an output
 	 */
 	 */

+ 1131 - 286
drivers/gpu/drm/i915/i915_debugfs.c

@@ -27,6 +27,8 @@
  */
  */
 
 
 #include <linux/seq_file.h>
 #include <linux/seq_file.h>
+#include <linux/circ_buf.h>
+#include <linux/ctype.h>
 #include <linux/debugfs.h>
 #include <linux/debugfs.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/export.h>
 #include <linux/export.h>
@@ -38,9 +40,6 @@
 #include <drm/i915_drm.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 #include "i915_drv.h"
 
 
-#define DRM_I915_RING_DEBUG 1
-
-
 #if defined(CONFIG_DEBUG_FS)
 #if defined(CONFIG_DEBUG_FS)
 
 
 enum {
 enum {
@@ -54,6 +53,32 @@ static const char *yesno(int v)
 	return v ? "yes" : "no";
 	return v ? "yes" : "no";
 }
 }
 
 
+/* As the drm_debugfs_init() routines are called before dev->dev_private is
+ * allocated we need to hook into the minor for release. */
+static int
+drm_add_fake_info_node(struct drm_minor *minor,
+		       struct dentry *ent,
+		       const void *key)
+{
+	struct drm_info_node *node;
+
+	node = kmalloc(sizeof(*node), GFP_KERNEL);
+	if (node == NULL) {
+		debugfs_remove(ent);
+		return -ENOMEM;
+	}
+
+	node->minor = minor;
+	node->dent = ent;
+	node->info_ent = (void *) key;
+
+	mutex_lock(&minor->debugfs_lock);
+	list_add(&node->list, &minor->debugfs_list);
+	mutex_unlock(&minor->debugfs_lock);
+
+	return 0;
+}
+
 static int i915_capabilities(struct seq_file *m, void *data)
 static int i915_capabilities(struct seq_file *m, void *data)
 {
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -145,6 +170,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		seq_printf(m, " (%s)", obj->ring->name);
 		seq_printf(m, " (%s)", obj->ring->name);
 }
 }
 
 
+static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
+{
+	seq_putc(m, ctx->is_initialized ? 'I' : 'i');
+	seq_putc(m, ctx->remap_slice ? 'R' : 'r');
+	seq_putc(m, ' ');
+}
+
 static int i915_gem_object_list_info(struct seq_file *m, void *data)
 static int i915_gem_object_list_info(struct seq_file *m, void *data)
 {
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -554,7 +586,53 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	if (IS_VALLEYVIEW(dev)) {
+	if (INTEL_INFO(dev)->gen >= 8) {
+		int i;
+		seq_printf(m, "Master Interrupt Control:\t%08x\n",
+			   I915_READ(GEN8_MASTER_IRQ));
+
+		for (i = 0; i < 4; i++) {
+			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
+				   i, I915_READ(GEN8_GT_IMR(i)));
+			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
+				   i, I915_READ(GEN8_GT_IIR(i)));
+			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
+				   i, I915_READ(GEN8_GT_IER(i)));
+		}
+
+		for_each_pipe(i) {
+			seq_printf(m, "Pipe %c IMR:\t%08x\n",
+				   pipe_name(i),
+				   I915_READ(GEN8_DE_PIPE_IMR(i)));
+			seq_printf(m, "Pipe %c IIR:\t%08x\n",
+				   pipe_name(i),
+				   I915_READ(GEN8_DE_PIPE_IIR(i)));
+			seq_printf(m, "Pipe %c IER:\t%08x\n",
+				   pipe_name(i),
+				   I915_READ(GEN8_DE_PIPE_IER(i)));
+		}
+
+		seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
+			   I915_READ(GEN8_DE_PORT_IMR));
+		seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
+			   I915_READ(GEN8_DE_PORT_IIR));
+		seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
+			   I915_READ(GEN8_DE_PORT_IER));
+
+		seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
+			   I915_READ(GEN8_DE_MISC_IMR));
+		seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
+			   I915_READ(GEN8_DE_MISC_IIR));
+		seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
+			   I915_READ(GEN8_DE_MISC_IER));
+
+		seq_printf(m, "PCU interrupt mask:\t%08x\n",
+			   I915_READ(GEN8_PCU_IMR));
+		seq_printf(m, "PCU interrupt identity:\t%08x\n",
+			   I915_READ(GEN8_PCU_IIR));
+		seq_printf(m, "PCU interrupt enable:\t%08x\n",
+			   I915_READ(GEN8_PCU_IER));
+	} else if (IS_VALLEYVIEW(dev)) {
 		seq_printf(m, "Display IER:\t%08x\n",
 		seq_printf(m, "Display IER:\t%08x\n",
 			   I915_READ(VLV_IER));
 			   I915_READ(VLV_IER));
 		seq_printf(m, "Display IIR:\t%08x\n",
 		seq_printf(m, "Display IIR:\t%08x\n",
@@ -626,7 +704,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 	seq_printf(m, "Interrupts received: %d\n",
 	seq_printf(m, "Interrupts received: %d\n",
 		   atomic_read(&dev_priv->irq_received));
 		   atomic_read(&dev_priv->irq_received));
 	for_each_ring(ring, dev_priv, i) {
 	for_each_ring(ring, dev_priv, i) {
-		if (IS_GEN6(dev) || IS_GEN7(dev)) {
+		if (INTEL_INFO(dev)->gen >= 6) {
 			seq_printf(m,
 			seq_printf(m,
 				   "Graphics Interrupt mask (%s):	%08x\n",
 				   "Graphics Interrupt mask (%s):	%08x\n",
 				   ring->name, I915_READ_IMR(ring));
 				   ring->name, I915_READ_IMR(ring));
@@ -843,6 +921,8 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	int ret;
 	int ret;
 
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	if (IS_GEN5(dev)) {
 	if (IS_GEN5(dev)) {
 		u16 rgvswctl = I915_READ16(MEMSWCTL);
 		u16 rgvswctl = I915_READ16(MEMSWCTL);
 		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
 		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
@@ -1321,6 +1401,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
 		return 0;
 		return 0;
 	}
 	}
 
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
@@ -1395,12 +1477,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
 {
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_device *dev = node->minor->dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct intel_fbdev *ifbdev;
+	struct intel_fbdev *ifbdev = NULL;
 	struct intel_framebuffer *fb;
 	struct intel_framebuffer *fb;
-	int ret;
 
 
-	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+#ifdef CONFIG_DRM_I915_FBDEV
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
@@ -1416,10 +1498,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
 	describe_obj(m, fb->obj);
 	describe_obj(m, fb->obj);
 	seq_putc(m, '\n');
 	seq_putc(m, '\n');
 	mutex_unlock(&dev->mode_config.mutex);
 	mutex_unlock(&dev->mode_config.mutex);
+#endif
 
 
 	mutex_lock(&dev->mode_config.fb_lock);
 	mutex_lock(&dev->mode_config.fb_lock);
 	list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
 	list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
-		if (&fb->base == ifbdev->helper.fb)
+		if (ifbdev && &fb->base == ifbdev->helper.fb)
 			continue;
 			continue;
 
 
 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
@@ -1442,6 +1525,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
 	struct drm_device *dev = node->minor->dev;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_ring_buffer *ring;
 	struct intel_ring_buffer *ring;
+	struct i915_hw_context *ctx;
 	int ret, i;
 	int ret, i;
 
 
 	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
 	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
@@ -1460,12 +1544,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
 		seq_putc(m, '\n');
 		seq_putc(m, '\n');
 	}
 	}
 
 
-	for_each_ring(ring, dev_priv, i) {
-		if (ring->default_context) {
-			seq_printf(m, "HW default context %s ring ", ring->name);
-			describe_obj(m, ring->default_context->obj);
-			seq_putc(m, '\n');
-		}
+	list_for_each_entry(ctx, &dev_priv->context_list, link) {
+		seq_puts(m, "HW context ");
+		describe_ctx(m, ctx);
+		for_each_ring(ring, dev_priv, i)
+			if (ring->default_context == ctx)
+				seq_printf(m, "(default context %s) ", ring->name);
+
+		describe_obj(m, ctx->obj);
+		seq_putc(m, '\n');
 	}
 	}
 
 
 	mutex_unlock(&dev->mode_config.mutex);
 	mutex_unlock(&dev->mode_config.mutex);
@@ -1536,7 +1623,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
 			   I915_READ16(C0DRB3));
 			   I915_READ16(C0DRB3));
 		seq_printf(m, "C1DRB3 = 0x%04x\n",
 		seq_printf(m, "C1DRB3 = 0x%04x\n",
 			   I915_READ16(C1DRB3));
 			   I915_READ16(C1DRB3));
-	} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
+	} else if (INTEL_INFO(dev)->gen >= 6) {
 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
 			   I915_READ(MAD_DIMM_C0));
 			   I915_READ(MAD_DIMM_C0));
 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
@@ -1545,8 +1632,12 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
 			   I915_READ(MAD_DIMM_C2));
 			   I915_READ(MAD_DIMM_C2));
 		seq_printf(m, "TILECTL = 0x%08x\n",
 		seq_printf(m, "TILECTL = 0x%08x\n",
 			   I915_READ(TILECTL));
 			   I915_READ(TILECTL));
-		seq_printf(m, "ARB_MODE = 0x%08x\n",
-			   I915_READ(ARB_MODE));
+		if (IS_GEN8(dev))
+			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
+				   I915_READ(GAMTARBMODE));
+		else
+			seq_printf(m, "ARB_MODE = 0x%08x\n",
+				   I915_READ(ARB_MODE));
 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
 			   I915_READ(DISP_ARB_CTL));
 			   I915_READ(DISP_ARB_CTL));
 	}
 	}
@@ -1555,18 +1646,37 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
 	return 0;
 	return 0;
 }
 }
 
 
-static int i915_ppgtt_info(struct seq_file *m, void *data)
+static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 {
 {
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_ring_buffer *ring;
 	struct intel_ring_buffer *ring;
-	int i, ret;
+	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+	int unused, i;
 
 
+	if (!ppgtt)
+		return;
+
+	seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
+	seq_printf(m, "Page tables: %d\n", ppgtt->num_pt_pages);
+	for_each_ring(ring, dev_priv, unused) {
+		seq_printf(m, "%s\n", ring->name);
+		for (i = 0; i < 4; i++) {
+			u32 offset = 0x270 + i * 8;
+			u64 pdp = I915_READ(ring->mmio_base + offset + 4);
+			pdp <<= 32;
+			pdp |= I915_READ(ring->mmio_base + offset);
+			for (i = 0; i < 4; i++)
+				seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
+		}
+	}
+}
+
+static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
+	int i;
 
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
 	if (INTEL_INFO(dev)->gen == 6)
 	if (INTEL_INFO(dev)->gen == 6)
 		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
 		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
 
 
@@ -1585,6 +1695,22 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
 		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
 		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
 	}
 	}
 	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
 	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
+}
+
+static int i915_ppgtt_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+
+	int ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	if (INTEL_INFO(dev)->gen >= 8)
+		gen8_ppgtt_info(m, dev);
+	else if (INTEL_INFO(dev)->gen >= 6)
+		gen6_ppgtt_info(m, dev);
+
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
 
 
 	return 0;
 	return 0;
@@ -1610,27 +1736,27 @@ static int i915_dpio_info(struct seq_file *m, void *data)
 	seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
 	seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
 
 
 	seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
 	seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_DIV_A));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_A));
 	seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
 	seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_DIV_B));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_B));
 
 
 	seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
 	seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_REFSFR_A));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_A));
 	seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
 	seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_REFSFR_B));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_B));
 
 
 	seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
 	seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_A));
 	seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
 	seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_B));
 
 
 	seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
 	seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_A));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_A));
 	seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
 	seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_B));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_B));
 
 
 	seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
 	seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
+		   vlv_dpio_read(dev_priv, PIPE_A, DPIO_FASTCLK_DISABLE));
 
 
 	mutex_unlock(&dev_priv->dpio_lock);
 	mutex_unlock(&dev_priv->dpio_lock);
 
 
@@ -1655,126 +1781,20 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 	struct drm_info_node *node = m->private;
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 psrstat, psrperf;
-
-	if (!IS_HASWELL(dev)) {
-		seq_puts(m, "PSR not supported on this platform\n");
-	} else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) {
-		seq_puts(m, "PSR enabled\n");
-	} else {
-		seq_puts(m, "PSR disabled: ");
-		switch (dev_priv->no_psr_reason) {
-		case PSR_NO_SOURCE:
-			seq_puts(m, "not supported on this platform");
-			break;
-		case PSR_NO_SINK:
-			seq_puts(m, "not supported by panel");
-			break;
-		case PSR_MODULE_PARAM:
-			seq_puts(m, "disabled by flag");
-			break;
-		case PSR_CRTC_NOT_ACTIVE:
-			seq_puts(m, "crtc not active");
-			break;
-		case PSR_PWR_WELL_ENABLED:
-			seq_puts(m, "power well enabled");
-			break;
-		case PSR_NOT_TILED:
-			seq_puts(m, "not tiled");
-			break;
-		case PSR_SPRITE_ENABLED:
-			seq_puts(m, "sprite enabled");
-			break;
-		case PSR_S3D_ENABLED:
-			seq_puts(m, "stereo 3d enabled");
-			break;
-		case PSR_INTERLACED_ENABLED:
-			seq_puts(m, "interlaced enabled");
-			break;
-		case PSR_HSW_NOT_DDIA:
-			seq_puts(m, "HSW ties PSR to DDI A (eDP)");
-			break;
-		default:
-			seq_puts(m, "unknown reason");
-		}
-		seq_puts(m, "\n");
-		return 0;
-	}
-
-	psrstat = I915_READ(EDP_PSR_STATUS_CTL);
-
-	seq_puts(m, "PSR Current State: ");
-	switch (psrstat & EDP_PSR_STATUS_STATE_MASK) {
-	case EDP_PSR_STATUS_STATE_IDLE:
-		seq_puts(m, "Reset state\n");
-		break;
-	case EDP_PSR_STATUS_STATE_SRDONACK:
-		seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
-		break;
-	case EDP_PSR_STATUS_STATE_SRDENT:
-		seq_puts(m, "SRD entry\n");
-		break;
-	case EDP_PSR_STATUS_STATE_BUFOFF:
-		seq_puts(m, "Wait for buffer turn off\n");
-		break;
-	case EDP_PSR_STATUS_STATE_BUFON:
-		seq_puts(m, "Wait for buffer turn on\n");
-		break;
-	case EDP_PSR_STATUS_STATE_AUXACK:
-		seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n");
-		break;
-	case EDP_PSR_STATUS_STATE_SRDOFFACK:
-		seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
-		break;
-	default:
-		seq_puts(m, "Unknown\n");
-		break;
-	}
-
-	seq_puts(m, "Link Status: ");
-	switch (psrstat & EDP_PSR_STATUS_LINK_MASK) {
-	case EDP_PSR_STATUS_LINK_FULL_OFF:
-		seq_puts(m, "Link is fully off\n");
-		break;
-	case EDP_PSR_STATUS_LINK_FULL_ON:
-		seq_puts(m, "Link is fully on\n");
-		break;
-	case EDP_PSR_STATUS_LINK_STANDBY:
-		seq_puts(m, "Link is in standby\n");
-		break;
-	default:
-		seq_puts(m, "Unknown\n");
-		break;
-	}
-
-	seq_printf(m, "PSR Entry Count: %u\n",
-		   psrstat >> EDP_PSR_STATUS_COUNT_SHIFT &
-		   EDP_PSR_STATUS_COUNT_MASK);
-
-	seq_printf(m, "Max Sleep Timer Counter: %u\n",
-		   psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT &
-		   EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK);
-
-	seq_printf(m, "Had AUX error: %s\n",
-		   yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR));
-
-	seq_printf(m, "Sending AUX: %s\n",
-		   yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING));
-
-	seq_printf(m, "Sending Idle: %s\n",
-		   yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE));
-
-	seq_printf(m, "Sending TP2 TP3: %s\n",
-		   yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3));
+	u32 psrperf = 0;
+	bool enabled = false;
 
 
-	seq_printf(m, "Sending TP1: %s\n",
-		   yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1));
+	seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
+	seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
 
 
-	seq_printf(m, "Idle Count: %u\n",
-		   psrstat & EDP_PSR_STATUS_IDLE_MASK);
+	enabled = HAS_PSR(dev) &&
+		I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+	seq_printf(m, "Enabled: %s\n", yesno(enabled));
 
 
-	psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK;
-	seq_printf(m, "Performance Counter: %u\n", psrperf);
+	if (HAS_PSR(dev))
+		psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
+			EDP_PSR_PERF_CNT_MASK;
+	seq_printf(m, "Performance_Counter: %u\n", psrperf);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1825,152 +1845,965 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
 	return 0;
 	return 0;
 }
 }
 
 
-static int
-i915_wedged_get(void *data, u64 *val)
+struct pipe_crc_info {
+	const char *name;
+	struct drm_device *dev;
+	enum pipe pipe;
+};
+
+static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
 {
 {
-	struct drm_device *dev = data;
-	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct pipe_crc_info *info = inode->i_private;
+	struct drm_i915_private *dev_priv = info->dev->dev_private;
+	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
 
 
-	*val = atomic_read(&dev_priv->gpu_error.reset_counter);
+	spin_lock_irq(&pipe_crc->lock);
+
+	if (pipe_crc->opened) {
+		spin_unlock_irq(&pipe_crc->lock);
+		return -EBUSY; /* already open */
+	}
+
+	pipe_crc->opened = true;
+	filep->private_data = inode->i_private;
+
+	spin_unlock_irq(&pipe_crc->lock);
 
 
 	return 0;
 	return 0;
 }
 }
 
 
-static int
-i915_wedged_set(void *data, u64 val)
+static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
 {
 {
-	struct drm_device *dev = data;
+	struct pipe_crc_info *info = inode->i_private;
+	struct drm_i915_private *dev_priv = info->dev->dev_private;
+	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
 
 
-	DRM_INFO("Manually setting wedged to %llu\n", val);
-	i915_handle_error(dev, val);
+	spin_lock_irq(&pipe_crc->lock);
+	pipe_crc->opened = false;
+	spin_unlock_irq(&pipe_crc->lock);
 
 
 	return 0;
 	return 0;
 }
 }
 
 
-DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
-			i915_wedged_get, i915_wedged_set,
-			"%llu\n");
+/* (6 fields, 8 chars each, space separated (5) + '\n') */
+#define PIPE_CRC_LINE_LEN	(6 * 8 + 5 + 1)
+/* account for \'0' */
+#define PIPE_CRC_BUFFER_LEN	(PIPE_CRC_LINE_LEN + 1)
 
 
-static int
-i915_ring_stop_get(void *data, u64 *val)
+static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
 {
 {
-	struct drm_device *dev = data;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-
-	*val = dev_priv->gpu_error.stop_rings;
-
-	return 0;
+	assert_spin_locked(&pipe_crc->lock);
+	return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
+			INTEL_PIPE_CRC_ENTRIES_NR);
 }
 }
 
 
-static int
-i915_ring_stop_set(void *data, u64 val)
+static ssize_t
+i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
+		   loff_t *pos)
 {
 {
-	struct drm_device *dev = data;
+	struct pipe_crc_info *info = filep->private_data;
+	struct drm_device *dev = info->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret;
+	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+	char buf[PIPE_CRC_BUFFER_LEN];
+	int head, tail, n_entries, n;
+	ssize_t bytes_read;
 
 
-	DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
+	/*
+	 * Don't allow user space to provide buffers not big enough to hold
+	 * a line of data.
+	 */
+	if (count < PIPE_CRC_LINE_LEN)
+		return -EINVAL;
 
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
+	if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
+		return 0;
 
 
-	dev_priv->gpu_error.stop_rings = val;
-	mutex_unlock(&dev->struct_mutex);
+	/* nothing to read */
+	spin_lock_irq(&pipe_crc->lock);
+	while (pipe_crc_data_count(pipe_crc) == 0) {
+		int ret;
 
 
-	return 0;
-}
+		if (filep->f_flags & O_NONBLOCK) {
+			spin_unlock_irq(&pipe_crc->lock);
+			return -EAGAIN;
+		}
 
 
-DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
-			i915_ring_stop_get, i915_ring_stop_set,
-			"0x%08llx\n");
+		ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
+				pipe_crc_data_count(pipe_crc), pipe_crc->lock);
+		if (ret) {
+			spin_unlock_irq(&pipe_crc->lock);
+			return ret;
+		}
+	}
 
 
-#define DROP_UNBOUND 0x1
-#define DROP_BOUND 0x2
-#define DROP_RETIRE 0x4
-#define DROP_ACTIVE 0x8
-#define DROP_ALL (DROP_UNBOUND | \
-		  DROP_BOUND | \
-		  DROP_RETIRE | \
-		  DROP_ACTIVE)
-static int
-i915_drop_caches_get(void *data, u64 *val)
-{
-	*val = DROP_ALL;
+	/* We now have one or more entries to read */
+	head = pipe_crc->head;
+	tail = pipe_crc->tail;
+	n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
+			count / PIPE_CRC_LINE_LEN);
+	spin_unlock_irq(&pipe_crc->lock);
 
 
-	return 0;
-}
+	bytes_read = 0;
+	n = 0;
+	do {
+		struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail];
+		int ret;
 
 
-static int
-i915_drop_caches_set(void *data, u64 val)
-{
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj, *next;
-	struct i915_address_space *vm;
-	struct i915_vma *vma, *x;
-	int ret;
+		bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
+				       "%8u %8x %8x %8x %8x %8x\n",
+				       entry->frame, entry->crc[0],
+				       entry->crc[1], entry->crc[2],
+				       entry->crc[3], entry->crc[4]);
 
 
-	DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
+		ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN,
+				   buf, PIPE_CRC_LINE_LEN);
+		if (ret == PIPE_CRC_LINE_LEN)
+			return -EFAULT;
 
 
-	/* No need to check and wait for gpu resets, only libdrm auto-restarts
-	 * on ioctls on -EAGAIN. */
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
+		BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
+		tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+		n++;
+	} while (--n_entries);
 
 
-	if (val & DROP_ACTIVE) {
-		ret = i915_gpu_idle(dev);
-		if (ret)
-			goto unlock;
-	}
+	spin_lock_irq(&pipe_crc->lock);
+	pipe_crc->tail = tail;
+	spin_unlock_irq(&pipe_crc->lock);
 
 
-	if (val & (DROP_RETIRE | DROP_ACTIVE))
-		i915_gem_retire_requests(dev);
+	return bytes_read;
+}
 
 
-	if (val & DROP_BOUND) {
-		list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
-			list_for_each_entry_safe(vma, x, &vm->inactive_list,
-						 mm_list) {
-				if (vma->obj->pin_count)
-					continue;
+static const struct file_operations i915_pipe_crc_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_pipe_crc_open,
+	.read = i915_pipe_crc_read,
+	.release = i915_pipe_crc_release,
+};
 
 
-				ret = i915_vma_unbind(vma);
-				if (ret)
-					goto unlock;
-			}
-		}
-	}
+static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
+	{
+		.name = "i915_pipe_A_crc",
+		.pipe = PIPE_A,
+	},
+	{
+		.name = "i915_pipe_B_crc",
+		.pipe = PIPE_B,
+	},
+	{
+		.name = "i915_pipe_C_crc",
+		.pipe = PIPE_C,
+	},
+};
 
 
-	if (val & DROP_UNBOUND) {
-		list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
-					 global_list)
-			if (obj->pages_pin_count == 0) {
-				ret = i915_gem_object_put_pages(obj);
-				if (ret)
-					goto unlock;
-			}
-	}
+static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
+				enum pipe pipe)
+{
+	struct drm_device *dev = minor->dev;
+	struct dentry *ent;
+	struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
 
 
-unlock:
-	mutex_unlock(&dev->struct_mutex);
+	info->dev = dev;
+	ent = debugfs_create_file(info->name, S_IRUGO, root, info,
+				  &i915_pipe_crc_fops);
+	if (IS_ERR(ent))
+		return PTR_ERR(ent);
 
 
-	return ret;
+	return drm_add_fake_info_node(minor, ent, info);
 }
 }
 
 
-DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
-			i915_drop_caches_get, i915_drop_caches_set,
-			"0x%08llx\n");
+static const char * const pipe_crc_sources[] = {
+	"none",
+	"plane1",
+	"plane2",
+	"pf",
+	"pipe",
+	"TV",
+	"DP-B",
+	"DP-C",
+	"DP-D",
+	"auto",
+};
 
 
-static int
-i915_max_freq_get(void *data, u64 *val)
+static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
 {
 {
-	struct drm_device *dev = data;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	int ret;
+	BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
+	return pipe_crc_sources[source];
+}
 
 
-	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
-		return -ENODEV;
+static int display_crc_ctl_show(struct seq_file *m, void *data)
+{
+	struct drm_device *dev = m->private;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	for (i = 0; i < I915_MAX_PIPES; i++)
+		seq_printf(m, "%c %s\n", pipe_name(i),
+			   pipe_crc_source_name(dev_priv->pipe_crc[i].source));
+
+	return 0;
+}
+
+static int display_crc_ctl_open(struct inode *inode, struct file *file)
+{
+	struct drm_device *dev = inode->i_private;
+
+	return single_open(file, display_crc_ctl_show, dev);
+}
+
+static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+				 uint32_t *val)
+{
+	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+	switch (*source) {
+	case INTEL_PIPE_CRC_SOURCE_PIPE:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_NONE:
+		*val = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
+				     enum intel_pipe_crc_source *source)
+{
+	struct intel_encoder *encoder;
+	struct intel_crtc *crtc;
+	struct intel_digital_port *dig_port;
+	int ret = 0;
+
+	*source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+	mutex_lock(&dev->mode_config.mutex);
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		if (!encoder->base.crtc)
+			continue;
+
+		crtc = to_intel_crtc(encoder->base.crtc);
+
+		if (crtc->pipe != pipe)
+			continue;
+
+		switch (encoder->type) {
+		case INTEL_OUTPUT_TVOUT:
+			*source = INTEL_PIPE_CRC_SOURCE_TV;
+			break;
+		case INTEL_OUTPUT_DISPLAYPORT:
+		case INTEL_OUTPUT_EDP:
+			dig_port = enc_to_dig_port(&encoder->base);
+			switch (dig_port->port) {
+			case PORT_B:
+				*source = INTEL_PIPE_CRC_SOURCE_DP_B;
+				break;
+			case PORT_C:
+				*source = INTEL_PIPE_CRC_SOURCE_DP_C;
+				break;
+			case PORT_D:
+				*source = INTEL_PIPE_CRC_SOURCE_DP_D;
+				break;
+			default:
+				WARN(1, "nonexisting DP port %c\n",
+				     port_name(dig_port->port));
+				break;
+			}
+			break;
+		}
+	}
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return ret;
+}
+
+static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
+				enum pipe pipe,
+				enum intel_pipe_crc_source *source,
+				uint32_t *val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool need_stable_symbols = false;
+
+	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
+		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+		if (ret)
+			return ret;
+	}
+
+	switch (*source) {
+	case INTEL_PIPE_CRC_SOURCE_PIPE:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_DP_B:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
+		need_stable_symbols = true;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_DP_C:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
+		need_stable_symbols = true;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_NONE:
+		*val = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/*
+	 * When the pipe CRC tap point is after the transcoders we need
+	 * to tweak symbol-level features to produce a deterministic series of
+	 * symbols for a given frame. We need to reset those features only once
+	 * a frame (instead of every nth symbol):
+	 *   - DC-balance: used to ensure a better clock recovery from the data
+	 *     link (SDVO)
+	 *   - DisplayPort scrambling: used for EMI reduction
+	 */
+	if (need_stable_symbols) {
+		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+		WARN_ON(!IS_G4X(dev));
+
+		tmp |= DC_BALANCE_RESET_VLV;
+		if (pipe == PIPE_A)
+			tmp |= PIPE_A_SCRAMBLE_RESET;
+		else
+			tmp |= PIPE_B_SCRAMBLE_RESET;
+
+		I915_WRITE(PORT_DFT2_G4X, tmp);
+	}
+
+	return 0;
+}
+
+static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
+				 enum pipe pipe,
+				 enum intel_pipe_crc_source *source,
+				 uint32_t *val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool need_stable_symbols = false;
+
+	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
+		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+		if (ret)
+			return ret;
+	}
+
+	switch (*source) {
+	case INTEL_PIPE_CRC_SOURCE_PIPE:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_TV:
+		if (!SUPPORTS_TV(dev))
+			return -EINVAL;
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_DP_B:
+		if (!IS_G4X(dev))
+			return -EINVAL;
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
+		need_stable_symbols = true;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_DP_C:
+		if (!IS_G4X(dev))
+			return -EINVAL;
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
+		need_stable_symbols = true;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_DP_D:
+		if (!IS_G4X(dev))
+			return -EINVAL;
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
+		need_stable_symbols = true;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_NONE:
+		*val = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/*
+	 * When the pipe CRC tap point is after the transcoders we need
+	 * to tweak symbol-level features to produce a deterministic series of
+	 * symbols for a given frame. We need to reset those features only once
+	 * a frame (instead of every nth symbol):
+	 *   - DC-balance: used to ensure a better clock recovery from the data
+	 *     link (SDVO)
+	 *   - DisplayPort scrambling: used for EMI reduction
+	 */
+	if (need_stable_symbols) {
+		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+		WARN_ON(!IS_G4X(dev));
+
+		I915_WRITE(PORT_DFT_I9XX,
+			   I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
+
+		if (pipe == PIPE_A)
+			tmp |= PIPE_A_SCRAMBLE_RESET;
+		else
+			tmp |= PIPE_B_SCRAMBLE_RESET;
+
+		I915_WRITE(PORT_DFT2_G4X, tmp);
+	}
+
+	return 0;
+}
+
+static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
+					 enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+	if (pipe == PIPE_A)
+		tmp &= ~PIPE_A_SCRAMBLE_RESET;
+	else
+		tmp &= ~PIPE_B_SCRAMBLE_RESET;
+	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
+		tmp &= ~DC_BALANCE_RESET_VLV;
+	I915_WRITE(PORT_DFT2_G4X, tmp);
+
+}
+
+static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
+					 enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+	if (pipe == PIPE_A)
+		tmp &= ~PIPE_A_SCRAMBLE_RESET;
+	else
+		tmp &= ~PIPE_B_SCRAMBLE_RESET;
+	I915_WRITE(PORT_DFT2_G4X, tmp);
+
+	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
+		I915_WRITE(PORT_DFT_I9XX,
+			   I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
+	}
+}
+
+static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+				uint32_t *val)
+{
+	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+	switch (*source) {
+	case INTEL_PIPE_CRC_SOURCE_PLANE1:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PLANE2:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PIPE:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_NONE:
+		*val = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+				uint32_t *val)
+{
+	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+		*source = INTEL_PIPE_CRC_SOURCE_PF;
+
+	switch (*source) {
+	case INTEL_PIPE_CRC_SOURCE_PLANE1:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PLANE2:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PF:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_NONE:
+		*val = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
+			       enum intel_pipe_crc_source source)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+	u32 val;
+	int ret;
+
+	if (pipe_crc->source == source)
+		return 0;
+
+	/* forbid changing the source without going back to 'none' */
+	if (pipe_crc->source && source)
+		return -EINVAL;
+
+	if (IS_GEN2(dev))
+		ret = i8xx_pipe_crc_ctl_reg(&source, &val);
+	else if (INTEL_INFO(dev)->gen < 5)
+		ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
+	else if (IS_VALLEYVIEW(dev))
+		ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val);
+	else if (IS_GEN5(dev) || IS_GEN6(dev))
+		ret = ilk_pipe_crc_ctl_reg(&source, &val);
+	else
+		ret = ivb_pipe_crc_ctl_reg(&source, &val);
+
+	if (ret != 0)
+		return ret;
+
+	/* none -> real source transition */
+	if (source) {
+		DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
+				 pipe_name(pipe), pipe_crc_source_name(source));
+
+		pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) *
+					    INTEL_PIPE_CRC_ENTRIES_NR,
+					    GFP_KERNEL);
+		if (!pipe_crc->entries)
+			return -ENOMEM;
+
+		spin_lock_irq(&pipe_crc->lock);
+		pipe_crc->head = 0;
+		pipe_crc->tail = 0;
+		spin_unlock_irq(&pipe_crc->lock);
+	}
+
+	pipe_crc->source = source;
+
+	I915_WRITE(PIPE_CRC_CTL(pipe), val);
+	POSTING_READ(PIPE_CRC_CTL(pipe));
+
+	/* real source -> none transition */
+	if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
+		struct intel_pipe_crc_entry *entries;
+
+		DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
+				 pipe_name(pipe));
+
+		intel_wait_for_vblank(dev, pipe);
+
+		spin_lock_irq(&pipe_crc->lock);
+		entries = pipe_crc->entries;
+		pipe_crc->entries = NULL;
+		spin_unlock_irq(&pipe_crc->lock);
+
+		kfree(entries);
+
+		if (IS_G4X(dev))
+			g4x_undo_pipe_scramble_reset(dev, pipe);
+		else if (IS_VALLEYVIEW(dev))
+			vlv_undo_pipe_scramble_reset(dev, pipe);
+	}
+
+	return 0;
+}
+
+/*
+ * Parse pipe CRC command strings:
+ *   command: wsp* object wsp+ name wsp+ source wsp*
+ *   object: 'pipe'
+ *   name: (A | B | C)
+ *   source: (none | plane1 | plane2 | pf)
+ *   wsp: (#0x20 | #0x9 | #0xA)+
+ *
+ * eg.:
+ *  "pipe A plane1"  ->  Start CRC computations on plane1 of pipe A
+ *  "pipe A none"    ->  Stop CRC
+ */
+static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
+{
+	int n_words = 0;
+
+	while (*buf) {
+		char *end;
+
+		/* skip leading white space */
+		buf = skip_spaces(buf);
+		if (!*buf)
+			break;	/* end of buffer */
+
+		/* find end of word */
+		for (end = buf; *end && !isspace(*end); end++)
+			;
+
+		if (n_words == max_words) {
+			DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
+					 max_words);
+			return -EINVAL;	/* ran out of words[] before bytes */
+		}
+
+		if (*end)
+			*end++ = '\0';
+		words[n_words++] = buf;
+		buf = end;
+	}
+
+	return n_words;
+}
+
+enum intel_pipe_crc_object {
+	PIPE_CRC_OBJECT_PIPE,
+};
+
+static const char * const pipe_crc_objects[] = {
+	"pipe",
+};
+
+static int
+display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
+		if (!strcmp(buf, pipe_crc_objects[i])) {
+			*o = i;
+			return 0;
+		    }
+
+	return -EINVAL;
+}
+
+static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
+{
+	const char name = buf[0];
+
+	if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
+		return -EINVAL;
+
+	*pipe = name - 'A';
+
+	return 0;
+}
+
+static int
+display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
+		if (!strcmp(buf, pipe_crc_sources[i])) {
+			*s = i;
+			return 0;
+		    }
+
+	return -EINVAL;
+}
+
+static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
+{
+#define N_WORDS 3
+	int n_words;
+	char *words[N_WORDS];
+	enum pipe pipe;
+	enum intel_pipe_crc_object object;
+	enum intel_pipe_crc_source source;
+
+	n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
+	if (n_words != N_WORDS) {
+		DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
+				 N_WORDS);
+		return -EINVAL;
+	}
+
+	if (display_crc_ctl_parse_object(words[0], &object) < 0) {
+		DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
+		return -EINVAL;
+	}
+
+	if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
+		DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
+		return -EINVAL;
+	}
+
+	if (display_crc_ctl_parse_source(words[2], &source) < 0) {
+		DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
+		return -EINVAL;
+	}
+
+	return pipe_crc_set_source(dev, pipe, source);
+}
+
+static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
+				     size_t len, loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	struct drm_device *dev = m->private;
+	char *tmpbuf;
+	int ret;
+
+	if (len == 0)
+		return 0;
+
+	if (len > PAGE_SIZE - 1) {
+		DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
+				 PAGE_SIZE);
+		return -E2BIG;
+	}
+
+	tmpbuf = kmalloc(len + 1, GFP_KERNEL);
+	if (!tmpbuf)
+		return -ENOMEM;
+
+	if (copy_from_user(tmpbuf, ubuf, len)) {
+		ret = -EFAULT;
+		goto out;
+	}
+	tmpbuf[len] = '\0';
+
+	ret = display_crc_ctl_parse(dev, tmpbuf, len);
+
+out:
+	kfree(tmpbuf);
+	if (ret < 0)
+		return ret;
+
+	*offp += len;
+	return len;
+}
+
+static const struct file_operations i915_display_crc_ctl_fops = {
+	.owner = THIS_MODULE,
+	.open = display_crc_ctl_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.write = display_crc_ctl_write
+};
+
+static int
+i915_wedged_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	*val = atomic_read(&dev_priv->gpu_error.reset_counter);
+
+	return 0;
+}
+
+static int
+i915_wedged_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+
+	DRM_INFO("Manually setting wedged to %llu\n", val);
+	i915_handle_error(dev, val);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
+			i915_wedged_get, i915_wedged_set,
+			"%llu\n");
+
+static int
+i915_ring_stop_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	*val = dev_priv->gpu_error.stop_rings;
+
+	return 0;
+}
+
+static int
+i915_ring_stop_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	dev_priv->gpu_error.stop_rings = val;
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
+			i915_ring_stop_get, i915_ring_stop_set,
+			"0x%08llx\n");
+
+static int
+i915_ring_missed_irq_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	*val = dev_priv->gpu_error.missed_irq_rings;
+	return 0;
+}
+
+static int
+i915_ring_missed_irq_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	/* Lock against concurrent debugfs callers */
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+	dev_priv->gpu_error.missed_irq_rings = val;
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
+			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
+			"0x%08llx\n");
+
+static int
+i915_ring_test_irq_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	*val = dev_priv->gpu_error.test_irq_rings;
+
+	return 0;
+}
+
+static int
+i915_ring_test_irq_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
+
+	/* Lock against concurrent debugfs callers */
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	dev_priv->gpu_error.test_irq_rings = val;
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
+			i915_ring_test_irq_get, i915_ring_test_irq_set,
+			"0x%08llx\n");
+
+#define DROP_UNBOUND 0x1
+#define DROP_BOUND 0x2
+#define DROP_RETIRE 0x4
+#define DROP_ACTIVE 0x8
+#define DROP_ALL (DROP_UNBOUND | \
+		  DROP_BOUND | \
+		  DROP_RETIRE | \
+		  DROP_ACTIVE)
+static int
+i915_drop_caches_get(void *data, u64 *val)
+{
+	*val = DROP_ALL;
+
+	return 0;
+}
+
+static int
+i915_drop_caches_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj, *next;
+	struct i915_address_space *vm;
+	struct i915_vma *vma, *x;
+	int ret;
+
+	DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
+
+	/* No need to check and wait for gpu resets, only libdrm auto-restarts
+	 * on ioctls on -EAGAIN. */
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	if (val & DROP_ACTIVE) {
+		ret = i915_gpu_idle(dev);
+		if (ret)
+			goto unlock;
+	}
+
+	if (val & (DROP_RETIRE | DROP_ACTIVE))
+		i915_gem_retire_requests(dev);
+
+	if (val & DROP_BOUND) {
+		list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+			list_for_each_entry_safe(vma, x, &vm->inactive_list,
+						 mm_list) {
+				if (vma->obj->pin_count)
+					continue;
+
+				ret = i915_vma_unbind(vma);
+				if (ret)
+					goto unlock;
+			}
+		}
+	}
+
+	if (val & DROP_UNBOUND) {
+		list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
+					 global_list)
+			if (obj->pages_pin_count == 0) {
+				ret = i915_gem_object_put_pages(obj);
+				if (ret)
+					goto unlock;
+			}
+	}
+
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
+			i915_drop_caches_get, i915_drop_caches_set,
+			"0x%08llx\n");
+
+static int
+i915_max_freq_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
+
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 
 
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
 	if (ret)
 	if (ret)
@@ -1996,6 +2829,8 @@ i915_max_freq_set(void *data, u64 val)
 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
 		return -ENODEV;
 		return -ENODEV;
 
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
 	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
 
 
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -2034,6 +2869,8 @@ i915_min_freq_get(void *data, u64 *val)
 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
 		return -ENODEV;
 		return -ENODEV;
 
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
@@ -2058,6 +2895,8 @@ i915_min_freq_set(void *data, u64 val)
 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
 		return -ENODEV;
 		return -ENODEV;
 
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
 	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
 
 
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -2136,32 +2975,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
 			i915_cache_sharing_get, i915_cache_sharing_set,
 			i915_cache_sharing_get, i915_cache_sharing_set,
 			"%llu\n");
 			"%llu\n");
 
 
-/* As the drm_debugfs_init() routines are called before dev->dev_private is
- * allocated we need to hook into the minor for release. */
-static int
-drm_add_fake_info_node(struct drm_minor *minor,
-		       struct dentry *ent,
-		       const void *key)
-{
-	struct drm_info_node *node;
-
-	node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
-	if (node == NULL) {
-		debugfs_remove(ent);
-		return -ENOMEM;
-	}
-
-	node->minor = minor;
-	node->dent = ent;
-	node->info_ent = (void *) key;
-
-	mutex_lock(&minor->debugfs_lock);
-	list_add(&node->list, &minor->debugfs_list);
-	mutex_unlock(&minor->debugfs_lock);
-
-	return 0;
-}
-
 static int i915_forcewake_open(struct inode *inode, struct file *file)
 static int i915_forcewake_open(struct inode *inode, struct file *file)
 {
 {
 	struct drm_device *dev = inode->i_private;
 	struct drm_device *dev = inode->i_private;
@@ -2227,7 +3040,7 @@ static int i915_debugfs_create(struct dentry *root,
 	return drm_add_fake_info_node(minor, ent, fops);
 	return drm_add_fake_info_node(minor, ent, fops);
 }
 }
 
 
-static struct drm_info_list i915_debugfs_list[] = {
+static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_capabilities", i915_capabilities, 0},
 	{"i915_capabilities", i915_capabilities, 0},
 	{"i915_gem_objects", i915_gem_object_info, 0},
 	{"i915_gem_objects", i915_gem_object_info, 0},
 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
@@ -2269,7 +3082,7 @@ static struct drm_info_list i915_debugfs_list[] = {
 };
 };
 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
 
 
-static struct i915_debugfs_files {
+static const struct i915_debugfs_files {
 	const char *name;
 	const char *name;
 	const struct file_operations *fops;
 	const struct file_operations *fops;
 } i915_debugfs_files[] = {
 } i915_debugfs_files[] = {
@@ -2278,11 +3091,28 @@ static struct i915_debugfs_files {
 	{"i915_min_freq", &i915_min_freq_fops},
 	{"i915_min_freq", &i915_min_freq_fops},
 	{"i915_cache_sharing", &i915_cache_sharing_fops},
 	{"i915_cache_sharing", &i915_cache_sharing_fops},
 	{"i915_ring_stop", &i915_ring_stop_fops},
 	{"i915_ring_stop", &i915_ring_stop_fops},
+	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
+	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
 	{"i915_error_state", &i915_error_state_fops},
 	{"i915_error_state", &i915_error_state_fops},
 	{"i915_next_seqno", &i915_next_seqno_fops},
 	{"i915_next_seqno", &i915_next_seqno_fops},
+	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
 };
 };
 
 
+void intel_display_crc_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
+		struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[i];
+
+		pipe_crc->opened = false;
+		spin_lock_init(&pipe_crc->lock);
+		init_waitqueue_head(&pipe_crc->wq);
+	}
+}
+
 int i915_debugfs_init(struct drm_minor *minor)
 int i915_debugfs_init(struct drm_minor *minor)
 {
 {
 	int ret, i;
 	int ret, i;
@@ -2291,6 +3121,12 @@ int i915_debugfs_init(struct drm_minor *minor)
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
+	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
+		ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
+		if (ret)
+			return ret;
+	}
+
 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
 		ret = i915_debugfs_create(minor->debugfs_root, minor,
 		ret = i915_debugfs_create(minor->debugfs_root, minor,
 					  i915_debugfs_files[i].name,
 					  i915_debugfs_files[i].name,
@@ -2310,8 +3146,17 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
 
 
 	drm_debugfs_remove_files(i915_debugfs_list,
 	drm_debugfs_remove_files(i915_debugfs_list,
 				 I915_DEBUGFS_ENTRIES, minor);
 				 I915_DEBUGFS_ENTRIES, minor);
+
 	drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
 	drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
 				 1, minor);
 				 1, minor);
+
+	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
+		struct drm_info_list *info_list =
+			(struct drm_info_list *)&i915_pipe_crc_data[i];
+
+		drm_debugfs_remove_files(info_list, 1, minor);
+	}
+
 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
 		struct drm_info_list *info_list =
 		struct drm_info_list *info_list =
 			(struct drm_info_list *) i915_debugfs_files[i].fops;
 			(struct drm_info_list *) i915_debugfs_files[i].fops;

+ 59 - 59
drivers/gpu/drm/i915/i915_dma.c

@@ -52,7 +52,7 @@
 	intel_ring_emit(LP_RING(dev_priv), x)
 	intel_ring_emit(LP_RING(dev_priv), x)
 
 
 #define ADVANCE_LP_RING() \
 #define ADVANCE_LP_RING() \
-	intel_ring_advance(LP_RING(dev_priv))
+	__intel_ring_advance(LP_RING(dev_priv))
 
 
 /**
 /**
  * Lock test for when it's just for synchronization of ring access.
  * Lock test for when it's just for synchronization of ring access.
@@ -641,7 +641,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
 
 
 	if (batch->num_cliprects) {
 	if (batch->num_cliprects) {
 		cliprects = kcalloc(batch->num_cliprects,
 		cliprects = kcalloc(batch->num_cliprects,
-				    sizeof(struct drm_clip_rect),
+				    sizeof(*cliprects),
 				    GFP_KERNEL);
 				    GFP_KERNEL);
 		if (cliprects == NULL)
 		if (cliprects == NULL)
 			return -ENOMEM;
 			return -ENOMEM;
@@ -703,7 +703,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
 
 
 	if (cmdbuf->num_cliprects) {
 	if (cmdbuf->num_cliprects) {
 		cliprects = kcalloc(cmdbuf->num_cliprects,
 		cliprects = kcalloc(cmdbuf->num_cliprects,
-				    sizeof(struct drm_clip_rect), GFP_KERNEL);
+				    sizeof(*cliprects), GFP_KERNEL);
 		if (cliprects == NULL) {
 		if (cliprects == NULL) {
 			ret = -ENOMEM;
 			ret = -ENOMEM;
 			goto fail_batch_free;
 			goto fail_batch_free;
@@ -931,7 +931,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
 		value = READ_BREADCRUMB(dev_priv);
 		value = READ_BREADCRUMB(dev_priv);
 		break;
 		break;
 	case I915_PARAM_CHIPSET_ID:
 	case I915_PARAM_CHIPSET_ID:
-		value = dev->pci_device;
+		value = dev->pdev->device;
 		break;
 		break;
 	case I915_PARAM_HAS_GEM:
 	case I915_PARAM_HAS_GEM:
 		value = 1;
 		value = 1;
@@ -1311,13 +1311,15 @@ static int i915_load_modeset_init(struct drm_device *dev)
 	if (ret)
 	if (ret)
 		goto cleanup_gem_stolen;
 		goto cleanup_gem_stolen;
 
 
+	intel_power_domains_init_hw(dev);
+
 	/* Important: The output setup functions called by modeset_init need
 	/* Important: The output setup functions called by modeset_init need
 	 * working irqs for e.g. gmbus and dp aux transfers. */
 	 * working irqs for e.g. gmbus and dp aux transfers. */
 	intel_modeset_init(dev);
 	intel_modeset_init(dev);
 
 
 	ret = i915_gem_init(dev);
 	ret = i915_gem_init(dev);
 	if (ret)
 	if (ret)
-		goto cleanup_irq;
+		goto cleanup_power;
 
 
 	INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
 	INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
 
 
@@ -1325,9 +1327,11 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
 
 	/* Always safe in the mode setting case. */
 	/* Always safe in the mode setting case. */
 	/* FIXME: do pre/post-mode set stuff in core KMS code */
 	/* FIXME: do pre/post-mode set stuff in core KMS code */
-	dev->vblank_disable_allowed = 1;
-	if (INTEL_INFO(dev)->num_pipes == 0)
+	dev->vblank_disable_allowed = true;
+	if (INTEL_INFO(dev)->num_pipes == 0) {
+		intel_display_power_put(dev, POWER_DOMAIN_VGA);
 		return 0;
 		return 0;
+	}
 
 
 	ret = intel_fbdev_init(dev);
 	ret = intel_fbdev_init(dev);
 	if (ret)
 	if (ret)
@@ -1362,7 +1366,8 @@ cleanup_gem:
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
 	i915_gem_cleanup_aliasing_ppgtt(dev);
 	i915_gem_cleanup_aliasing_ppgtt(dev);
 	drm_mm_takedown(&dev_priv->gtt.base.mm);
 	drm_mm_takedown(&dev_priv->gtt.base.mm);
-cleanup_irq:
+cleanup_power:
+	intel_display_power_put(dev, POWER_DOMAIN_VGA);
 	drm_irq_uninstall(dev);
 	drm_irq_uninstall(dev);
 cleanup_gem_stolen:
 cleanup_gem_stolen:
 	i915_gem_cleanup_stolen(dev);
 	i915_gem_cleanup_stolen(dev);
@@ -1398,6 +1403,7 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
 	master->driver_priv = NULL;
 	master->driver_priv = NULL;
 }
 }
 
 
+#ifdef CONFIG_DRM_I915_FBDEV
 static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
 static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
 {
 {
 	struct apertures_struct *ap;
 	struct apertures_struct *ap;
@@ -1418,6 +1424,11 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
 
 
 	kfree(ap);
 	kfree(ap);
 }
 }
+#else
+static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+}
+#endif
 
 
 static void i915_dump_device_info(struct drm_i915_private *dev_priv)
 static void i915_dump_device_info(struct drm_i915_private *dev_priv)
 {
 {
@@ -1459,17 +1470,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	info = (struct intel_device_info *) flags;
 	info = (struct intel_device_info *) flags;
 
 
 	/* Refuse to load on gen6+ without kms enabled. */
 	/* Refuse to load on gen6+ without kms enabled. */
-	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
+	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
+		DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
+		DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
 		return -ENODEV;
 		return -ENODEV;
+	}
 
 
-	/* i915 has 4 more counters */
-	dev->counters += 4;
-	dev->types[6] = _DRM_STAT_IRQ;
-	dev->types[7] = _DRM_STAT_PRIMARY;
-	dev->types[8] = _DRM_STAT_SECONDARY;
-	dev->types[9] = _DRM_STAT_DMA;
-
-	dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
+	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
 	if (dev_priv == NULL)
 	if (dev_priv == NULL)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
@@ -1494,6 +1501,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
 	dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
 	INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
 	INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
 
 
+	intel_display_crc_init(dev);
+
 	i915_dump_device_info(dev_priv);
 	i915_dump_device_info(dev_priv);
 
 
 	/* Not all pre-production machines fall into this category, only the
 	/* Not all pre-production machines fall into this category, only the
@@ -1531,19 +1540,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
 
 	intel_uncore_early_sanitize(dev);
 	intel_uncore_early_sanitize(dev);
 
 
-	if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) {
-		/* The docs do not explain exactly how the calculation can be
-		 * made. It is somewhat guessable, but for now, it's always
-		 * 128MB.
-		 * NB: We can't write IDICR yet because we do not have gt funcs
-		 * set up */
-		dev_priv->ellc_size = 128;
-		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
-	}
+	/* This must be called before any calls to HAS_PCH_* */
+	intel_detect_pch(dev);
+
+	intel_uncore_init(dev);
 
 
 	ret = i915_gem_gtt_init(dev);
 	ret = i915_gem_gtt_init(dev);
 	if (ret)
 	if (ret)
-		goto put_bridge;
+		goto out_regs;
 
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		i915_kick_out_firmware_fb(dev_priv);
 		i915_kick_out_firmware_fb(dev_priv);
@@ -1572,7 +1576,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 				     aperture_size);
 				     aperture_size);
 	if (dev_priv->gtt.mappable == NULL) {
 	if (dev_priv->gtt.mappable == NULL) {
 		ret = -EIO;
 		ret = -EIO;
-		goto out_rmmap;
+		goto out_gtt;
 	}
 	}
 
 
 	dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
 	dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
@@ -1598,13 +1602,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 		goto out_mtrrfree;
 		goto out_mtrrfree;
 	}
 	}
 
 
-	/* This must be called before any calls to HAS_PCH_* */
-	intel_detect_pch(dev);
-
 	intel_irq_init(dev);
 	intel_irq_init(dev);
 	intel_pm_init(dev);
 	intel_pm_init(dev);
 	intel_uncore_sanitize(dev);
 	intel_uncore_sanitize(dev);
-	intel_uncore_init(dev);
 
 
 	/* Try to make sure MCHBAR is enabled before poking at it */
 	/* Try to make sure MCHBAR is enabled before poking at it */
 	intel_setup_mchbar(dev);
 	intel_setup_mchbar(dev);
@@ -1640,13 +1640,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	}
 	}
 
 
 	if (HAS_POWER_WELL(dev))
 	if (HAS_POWER_WELL(dev))
-		i915_init_power_well(dev);
+		intel_power_domains_init(dev);
 
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 		ret = i915_load_modeset_init(dev);
 		ret = i915_load_modeset_init(dev);
 		if (ret < 0) {
 		if (ret < 0) {
 			DRM_ERROR("failed to init modeset\n");
 			DRM_ERROR("failed to init modeset\n");
-			goto out_gem_unload;
+			goto out_power_well;
 		}
 		}
 	} else {
 	} else {
 		/* Start out suspended in ums mode. */
 		/* Start out suspended in ums mode. */
@@ -1666,6 +1666,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
 
 	return 0;
 	return 0;
 
 
+out_power_well:
+	if (HAS_POWER_WELL(dev))
+		intel_power_domains_remove(dev);
+	drm_vblank_cleanup(dev);
 out_gem_unload:
 out_gem_unload:
 	if (dev_priv->mm.inactive_shrinker.scan_objects)
 	if (dev_priv->mm.inactive_shrinker.scan_objects)
 		unregister_shrinker(&dev_priv->mm.inactive_shrinker);
 		unregister_shrinker(&dev_priv->mm.inactive_shrinker);
@@ -1679,12 +1683,18 @@ out_gem_unload:
 out_mtrrfree:
 out_mtrrfree:
 	arch_phys_wc_del(dev_priv->gtt.mtrr);
 	arch_phys_wc_del(dev_priv->gtt.mtrr);
 	io_mapping_free(dev_priv->gtt.mappable);
 	io_mapping_free(dev_priv->gtt.mappable);
+out_gtt:
+	list_del(&dev_priv->gtt.base.global_link);
+	drm_mm_takedown(&dev_priv->gtt.base.mm);
 	dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
 	dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
-out_rmmap:
+out_regs:
+	intel_uncore_fini(dev);
 	pci_iounmap(dev->pdev, dev_priv->regs);
 	pci_iounmap(dev->pdev, dev_priv->regs);
 put_bridge:
 put_bridge:
 	pci_dev_put(dev_priv->bridge_dev);
 	pci_dev_put(dev_priv->bridge_dev);
 free_priv:
 free_priv:
+	if (dev_priv->slab)
+		kmem_cache_destroy(dev_priv->slab);
 	kfree(dev_priv);
 	kfree(dev_priv);
 	return ret;
 	return ret;
 }
 }
@@ -1700,8 +1710,8 @@ int i915_driver_unload(struct drm_device *dev)
 		/* The i915.ko module is still not prepared to be loaded when
 		/* The i915.ko module is still not prepared to be loaded when
 		 * the power well is not enabled, so just enable it in case
 		 * the power well is not enabled, so just enable it in case
 		 * we're going to unload/reload. */
 		 * we're going to unload/reload. */
-		intel_set_power_well(dev, true);
-		i915_remove_power_well(dev);
+		intel_display_set_init_power(dev, true);
+		intel_power_domains_remove(dev);
 	}
 	}
 
 
 	i915_teardown_sysfs(dev);
 	i915_teardown_sysfs(dev);
@@ -1709,15 +1719,9 @@ int i915_driver_unload(struct drm_device *dev)
 	if (dev_priv->mm.inactive_shrinker.scan_objects)
 	if (dev_priv->mm.inactive_shrinker.scan_objects)
 		unregister_shrinker(&dev_priv->mm.inactive_shrinker);
 		unregister_shrinker(&dev_priv->mm.inactive_shrinker);
 
 
-	mutex_lock(&dev->struct_mutex);
-	ret = i915_gpu_idle(dev);
+	ret = i915_gem_suspend(dev);
 	if (ret)
 	if (ret)
 		DRM_ERROR("failed to idle hardware: %d\n", ret);
 		DRM_ERROR("failed to idle hardware: %d\n", ret);
-	i915_gem_retire_requests(dev);
-	mutex_unlock(&dev->struct_mutex);
-
-	/* Cancel the retire work handler, which should be idle now. */
-	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
 
 
 	io_mapping_free(dev_priv->gtt.mappable);
 	io_mapping_free(dev_priv->gtt.mappable);
 	arch_phys_wc_del(dev_priv->gtt.mtrr);
 	arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1774,8 +1778,8 @@ int i915_driver_unload(struct drm_device *dev)
 	list_del(&dev_priv->gtt.base.global_link);
 	list_del(&dev_priv->gtt.base.global_link);
 	WARN_ON(!list_empty(&dev_priv->vm_list));
 	WARN_ON(!list_empty(&dev_priv->vm_list));
 	drm_mm_takedown(&dev_priv->gtt.base.mm);
 	drm_mm_takedown(&dev_priv->gtt.base.mm);
-	if (dev_priv->regs != NULL)
-		pci_iounmap(dev->pdev, dev_priv->regs);
+
+	drm_vblank_cleanup(dev);
 
 
 	intel_teardown_gmbus(dev);
 	intel_teardown_gmbus(dev);
 	intel_teardown_mchbar(dev);
 	intel_teardown_mchbar(dev);
@@ -1785,6 +1789,10 @@ int i915_driver_unload(struct drm_device *dev)
 
 
 	dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
 	dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
 
 
+	intel_uncore_fini(dev);
+	if (dev_priv->regs != NULL)
+		pci_iounmap(dev->pdev, dev_priv->regs);
+
 	if (dev_priv->slab)
 	if (dev_priv->slab)
 		kmem_cache_destroy(dev_priv->slab);
 		kmem_cache_destroy(dev_priv->slab);
 
 
@@ -1796,19 +1804,11 @@ int i915_driver_unload(struct drm_device *dev)
 
 
 int i915_driver_open(struct drm_device *dev, struct drm_file *file)
 int i915_driver_open(struct drm_device *dev, struct drm_file *file)
 {
 {
-	struct drm_i915_file_private *file_priv;
-
-	DRM_DEBUG_DRIVER("\n");
-	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
-	if (!file_priv)
-		return -ENOMEM;
-
-	file->driver_priv = file_priv;
-
-	spin_lock_init(&file_priv->mm.lock);
-	INIT_LIST_HEAD(&file_priv->mm.request_list);
+	int ret;
 
 
-	idr_init(&file_priv->context_idr);
+	ret = i915_gem_open(dev, file);
+	if (ret)
+		return ret;
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1836,7 +1836,7 @@ void i915_driver_lastclose(struct drm_device * dev)
 		return;
 		return;
 
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		intel_fb_restore_mode(dev);
+		intel_fbdev_restore_mode(dev);
 		vga_switcheroo_process_delayed_switch();
 		vga_switcheroo_process_delayed_switch();
 		return;
 		return;
 	}
 	}

+ 102 - 85
drivers/gpu/drm/i915/i915_drv.c

@@ -160,49 +160,58 @@ extern int intel_agp_enabled;
 static const struct intel_device_info intel_i830_info = {
 static const struct intel_device_info intel_i830_info = {
 	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
 	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
 	.has_overlay = 1, .overlay_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
 };
 };
 
 
 static const struct intel_device_info intel_845g_info = {
 static const struct intel_device_info intel_845g_info = {
 	.gen = 2, .num_pipes = 1,
 	.gen = 2, .num_pipes = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
 };
 };
 
 
 static const struct intel_device_info intel_i85x_info = {
 static const struct intel_device_info intel_i85x_info = {
 	.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
 	.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
 	.cursor_needs_physical = 1,
 	.cursor_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
 };
 };
 
 
 static const struct intel_device_info intel_i865g_info = {
 static const struct intel_device_info intel_i865g_info = {
 	.gen = 2, .num_pipes = 1,
 	.gen = 2, .num_pipes = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
 };
 };
 
 
 static const struct intel_device_info intel_i915g_info = {
 static const struct intel_device_info intel_i915g_info = {
 	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
 	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
 	.has_overlay = 1, .overlay_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
 };
 };
 static const struct intel_device_info intel_i915gm_info = {
 static const struct intel_device_info intel_i915gm_info = {
 	.gen = 3, .is_mobile = 1, .num_pipes = 2,
 	.gen = 3, .is_mobile = 1, .num_pipes = 2,
 	.cursor_needs_physical = 1,
 	.cursor_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
 	.supports_tv = 1,
 	.supports_tv = 1,
+	.ring_mask = RENDER_RING,
 };
 };
 static const struct intel_device_info intel_i945g_info = {
 static const struct intel_device_info intel_i945g_info = {
 	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
 	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
 	.has_overlay = 1, .overlay_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
 };
 };
 static const struct intel_device_info intel_i945gm_info = {
 static const struct intel_device_info intel_i945gm_info = {
 	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
 	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
 	.has_hotplug = 1, .cursor_needs_physical = 1,
 	.has_hotplug = 1, .cursor_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
 	.supports_tv = 1,
 	.supports_tv = 1,
+	.ring_mask = RENDER_RING,
 };
 };
 
 
 static const struct intel_device_info intel_i965g_info = {
 static const struct intel_device_info intel_i965g_info = {
 	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
 	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
 	.has_hotplug = 1,
 	.has_hotplug = 1,
 	.has_overlay = 1,
 	.has_overlay = 1,
+	.ring_mask = RENDER_RING,
 };
 };
 
 
 static const struct intel_device_info intel_i965gm_info = {
 static const struct intel_device_info intel_i965gm_info = {
@@ -210,18 +219,20 @@ static const struct intel_device_info intel_i965gm_info = {
 	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
 	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
 	.has_overlay = 1,
 	.has_overlay = 1,
 	.supports_tv = 1,
 	.supports_tv = 1,
+	.ring_mask = RENDER_RING,
 };
 };
 
 
 static const struct intel_device_info intel_g33_info = {
 static const struct intel_device_info intel_g33_info = {
 	.gen = 3, .is_g33 = 1, .num_pipes = 2,
 	.gen = 3, .is_g33 = 1, .num_pipes = 2,
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.has_overlay = 1,
 	.has_overlay = 1,
+	.ring_mask = RENDER_RING,
 };
 };
 
 
 static const struct intel_device_info intel_g45_info = {
 static const struct intel_device_info intel_g45_info = {
 	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
 	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
 	.has_pipe_cxsr = 1, .has_hotplug = 1,
 	.has_pipe_cxsr = 1, .has_hotplug = 1,
-	.has_bsd_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING,
 };
 };
 
 
 static const struct intel_device_info intel_gm45_info = {
 static const struct intel_device_info intel_gm45_info = {
@@ -229,7 +240,7 @@ static const struct intel_device_info intel_gm45_info = {
 	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
 	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
 	.has_pipe_cxsr = 1, .has_hotplug = 1,
 	.has_pipe_cxsr = 1, .has_hotplug = 1,
 	.supports_tv = 1,
 	.supports_tv = 1,
-	.has_bsd_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING,
 };
 };
 
 
 static const struct intel_device_info intel_pineview_info = {
 static const struct intel_device_info intel_pineview_info = {
@@ -241,42 +252,36 @@ static const struct intel_device_info intel_pineview_info = {
 static const struct intel_device_info intel_ironlake_d_info = {
 static const struct intel_device_info intel_ironlake_d_info = {
 	.gen = 5, .num_pipes = 2,
 	.gen = 5, .num_pipes = 2,
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.need_gfx_hws = 1, .has_hotplug = 1,
-	.has_bsd_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING,
 };
 };
 
 
 static const struct intel_device_info intel_ironlake_m_info = {
 static const struct intel_device_info intel_ironlake_m_info = {
 	.gen = 5, .is_mobile = 1, .num_pipes = 2,
 	.gen = 5, .is_mobile = 1, .num_pipes = 2,
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.has_fbc = 1,
 	.has_fbc = 1,
-	.has_bsd_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING,
 };
 };
 
 
 static const struct intel_device_info intel_sandybridge_d_info = {
 static const struct intel_device_info intel_sandybridge_d_info = {
 	.gen = 6, .num_pipes = 2,
 	.gen = 6, .num_pipes = 2,
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.need_gfx_hws = 1, .has_hotplug = 1,
-	.has_bsd_ring = 1,
-	.has_blt_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
 	.has_llc = 1,
 	.has_llc = 1,
-	.has_force_wake = 1,
 };
 };
 
 
 static const struct intel_device_info intel_sandybridge_m_info = {
 static const struct intel_device_info intel_sandybridge_m_info = {
 	.gen = 6, .is_mobile = 1, .num_pipes = 2,
 	.gen = 6, .is_mobile = 1, .num_pipes = 2,
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.has_fbc = 1,
 	.has_fbc = 1,
-	.has_bsd_ring = 1,
-	.has_blt_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
 	.has_llc = 1,
 	.has_llc = 1,
-	.has_force_wake = 1,
 };
 };
 
 
 #define GEN7_FEATURES  \
 #define GEN7_FEATURES  \
 	.gen = 7, .num_pipes = 3, \
 	.gen = 7, .num_pipes = 3, \
 	.need_gfx_hws = 1, .has_hotplug = 1, \
 	.need_gfx_hws = 1, .has_hotplug = 1, \
-	.has_bsd_ring = 1, \
-	.has_blt_ring = 1, \
-	.has_llc = 1, \
-	.has_force_wake = 1
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+	.has_llc = 1
 
 
 static const struct intel_device_info intel_ivybridge_d_info = {
 static const struct intel_device_info intel_ivybridge_d_info = {
 	GEN7_FEATURES,
 	GEN7_FEATURES,
@@ -318,7 +323,7 @@ static const struct intel_device_info intel_haswell_d_info = {
 	.is_haswell = 1,
 	.is_haswell = 1,
 	.has_ddi = 1,
 	.has_ddi = 1,
 	.has_fpga_dbg = 1,
 	.has_fpga_dbg = 1,
-	.has_vebox_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
 };
 };
 
 
 static const struct intel_device_info intel_haswell_m_info = {
 static const struct intel_device_info intel_haswell_m_info = {
@@ -328,7 +333,25 @@ static const struct intel_device_info intel_haswell_m_info = {
 	.has_ddi = 1,
 	.has_ddi = 1,
 	.has_fpga_dbg = 1,
 	.has_fpga_dbg = 1,
 	.has_fbc = 1,
 	.has_fbc = 1,
-	.has_vebox_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+};
+
+static const struct intel_device_info intel_broadwell_d_info = {
+	.is_preliminary = 1,
+	.gen = 8, .num_pipes = 3,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+	.has_llc = 1,
+	.has_ddi = 1,
+};
+
+static const struct intel_device_info intel_broadwell_m_info = {
+	.is_preliminary = 1,
+	.gen = 8, .is_mobile = 1, .num_pipes = 3,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+	.has_llc = 1,
+	.has_ddi = 1,
 };
 };
 
 
 /*
 /*
@@ -362,7 +385,9 @@ static const struct intel_device_info intel_haswell_m_info = {
 	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
 	INTEL_HSW_D_IDS(&intel_haswell_d_info), \
 	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
 	INTEL_HSW_M_IDS(&intel_haswell_m_info), \
 	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
 	INTEL_VLV_M_IDS(&intel_valleyview_m_info),	\
-	INTEL_VLV_D_IDS(&intel_valleyview_d_info)
+	INTEL_VLV_D_IDS(&intel_valleyview_d_info),	\
+	INTEL_BDW_M_IDS(&intel_broadwell_m_info),	\
+	INTEL_BDW_D_IDS(&intel_broadwell_d_info)
 
 
 static const struct pci_device_id pciidlist[] = {		/* aka */
 static const struct pci_device_id pciidlist[] = {		/* aka */
 	INTEL_PCI_IDS,
 	INTEL_PCI_IDS,
@@ -416,13 +441,19 @@ void intel_detect_pch(struct drm_device *dev)
 			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
 			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
 				/* PantherPoint is CPT compatible */
 				/* PantherPoint is CPT compatible */
 				dev_priv->pch_type = PCH_CPT;
 				dev_priv->pch_type = PCH_CPT;
-				DRM_DEBUG_KMS("Found PatherPoint PCH\n");
+				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
 				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
 				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
 			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
 			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
 				dev_priv->pch_type = PCH_LPT;
 				dev_priv->pch_type = PCH_LPT;
 				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
 				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
 				WARN_ON(!IS_HASWELL(dev));
 				WARN_ON(!IS_HASWELL(dev));
 				WARN_ON(IS_ULT(dev));
 				WARN_ON(IS_ULT(dev));
+			} else if (IS_BROADWELL(dev)) {
+				dev_priv->pch_type = PCH_LPT;
+				dev_priv->pch_id =
+					INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
+				DRM_DEBUG_KMS("This is Broadwell, assuming "
+					      "LynxPoint LP PCH\n");
 			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
 			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
 				dev_priv->pch_type = PCH_LPT;
 				dev_priv->pch_type = PCH_LPT;
 				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
 				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
@@ -447,6 +478,12 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
 	if (INTEL_INFO(dev)->gen < 6)
 	if (INTEL_INFO(dev)->gen < 6)
 		return 0;
 		return 0;
 
 
+	/* Until we get further testing... */
+	if (IS_GEN8(dev)) {
+		WARN_ON(!i915_preliminary_hw_support);
+		return 0;
+	}
+
 	if (i915_semaphores >= 0)
 	if (i915_semaphores >= 0)
 		return i915_semaphores;
 		return i915_semaphores;
 
 
@@ -472,7 +509,7 @@ static int i915_drm_freeze(struct drm_device *dev)
 	/* We do a lot of poking in a lot of registers, make sure they work
 	/* We do a lot of poking in a lot of registers, make sure they work
 	 * properly. */
 	 * properly. */
 	hsw_disable_package_c8(dev_priv);
 	hsw_disable_package_c8(dev_priv);
-	intel_set_power_well(dev, true);
+	intel_display_set_init_power(dev, true);
 
 
 	drm_kms_helper_poll_disable(dev);
 	drm_kms_helper_poll_disable(dev);
 
 
@@ -482,9 +519,7 @@ static int i915_drm_freeze(struct drm_device *dev)
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 		int error;
 		int error;
 
 
-		mutex_lock(&dev->struct_mutex);
-		error = i915_gem_idle(dev);
-		mutex_unlock(&dev->struct_mutex);
+		error = i915_gem_suspend(dev);
 		if (error) {
 		if (error) {
 			dev_err(&dev->pdev->dev,
 			dev_err(&dev->pdev->dev,
 				"GEM idle failed, resume might fail\n");
 				"GEM idle failed, resume might fail\n");
@@ -578,11 +613,24 @@ static void intel_resume_hotplug(struct drm_device *dev)
 	drm_helper_hpd_irq_event(dev);
 	drm_helper_hpd_irq_event(dev);
 }
 }
 
 
-static int __i915_drm_thaw(struct drm_device *dev)
+static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int error = 0;
 	int error = 0;
 
 
+	intel_uncore_early_sanitize(dev);
+
+	intel_uncore_sanitize(dev);
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
+	    restore_gtt_mappings) {
+		mutex_lock(&dev->struct_mutex);
+		i915_gem_restore_gtt_mappings(dev);
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	intel_power_domains_init_hw(dev);
+
 	i915_restore_state(dev);
 	i915_restore_state(dev);
 	intel_opregion_setup(dev);
 	intel_opregion_setup(dev);
 
 
@@ -642,20 +690,10 @@ static int __i915_drm_thaw(struct drm_device *dev)
 
 
 static int i915_drm_thaw(struct drm_device *dev)
 static int i915_drm_thaw(struct drm_device *dev)
 {
 {
-	int error = 0;
-
-	intel_uncore_sanitize(dev);
-
-	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		mutex_lock(&dev->struct_mutex);
-		i915_gem_restore_gtt_mappings(dev);
-		mutex_unlock(&dev->struct_mutex);
-	} else if (drm_core_check_feature(dev, DRIVER_MODESET))
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		i915_check_and_clear_faults(dev);
 		i915_check_and_clear_faults(dev);
 
 
-	__i915_drm_thaw(dev);
-
-	return error;
+	return __i915_drm_thaw(dev, true);
 }
 }
 
 
 int i915_resume(struct drm_device *dev)
 int i915_resume(struct drm_device *dev)
@@ -671,20 +709,12 @@ int i915_resume(struct drm_device *dev)
 
 
 	pci_set_master(dev->pdev);
 	pci_set_master(dev->pdev);
 
 
-	intel_uncore_sanitize(dev);
-
 	/*
 	/*
 	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
 	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
-	 * earlier) need this since the BIOS might clear all our scratch PTEs.
+	 * earlier) need to restore the GTT mappings since the BIOS might clear
+	 * all our scratch PTEs.
 	 */
 	 */
-	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
-	    !dev_priv->opregion.header) {
-		mutex_lock(&dev->struct_mutex);
-		i915_gem_restore_gtt_mappings(dev);
-		mutex_unlock(&dev->struct_mutex);
-	}
-
-	ret = __i915_drm_thaw(dev);
+	ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
@@ -722,24 +752,19 @@ int i915_reset(struct drm_device *dev)
 
 
 	simulated = dev_priv->gpu_error.stop_rings != 0;
 	simulated = dev_priv->gpu_error.stop_rings != 0;
 
 
-	if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) {
-		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
-		ret = -ENODEV;
-	} else {
-		ret = intel_gpu_reset(dev);
-
-		/* Also reset the gpu hangman. */
-		if (simulated) {
-			DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
-			dev_priv->gpu_error.stop_rings = 0;
-			if (ret == -ENODEV) {
-				DRM_ERROR("Reset not implemented, but ignoring "
-					  "error for simulated gpu hangs\n");
-				ret = 0;
-			}
-		} else
-			dev_priv->gpu_error.last_reset = get_seconds();
+	ret = intel_gpu_reset(dev);
+
+	/* Also reset the gpu hangman. */
+	if (simulated) {
+		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
+		dev_priv->gpu_error.stop_rings = 0;
+		if (ret == -ENODEV) {
+			DRM_ERROR("Reset not implemented, but ignoring "
+				  "error for simulated gpu hangs\n");
+			ret = 0;
+		}
 	}
 	}
+
 	if (ret) {
 	if (ret) {
 		DRM_ERROR("Failed to reset chip.\n");
 		DRM_ERROR("Failed to reset chip.\n");
 		mutex_unlock(&dev->struct_mutex);
 		mutex_unlock(&dev->struct_mutex);
@@ -762,30 +787,17 @@ int i915_reset(struct drm_device *dev)
 	 */
 	 */
 	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
 	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
 			!dev_priv->ums.mm_suspended) {
 			!dev_priv->ums.mm_suspended) {
-		struct intel_ring_buffer *ring;
-		int i;
-
+		bool hw_contexts_disabled = dev_priv->hw_contexts_disabled;
 		dev_priv->ums.mm_suspended = 0;
 		dev_priv->ums.mm_suspended = 0;
 
 
-		i915_gem_init_swizzling(dev);
-
-		for_each_ring(ring, dev_priv, i)
-			ring->init(ring);
-
-		i915_gem_context_init(dev);
-		if (dev_priv->mm.aliasing_ppgtt) {
-			ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
-			if (ret)
-				i915_gem_cleanup_aliasing_ppgtt(dev);
-		}
-
-		/*
-		 * It would make sense to re-init all the other hw state, at
-		 * least the rps/rc6/emon init done within modeset_init_hw. For
-		 * some unknown reason, this blows up my ilk, so don't.
-		 */
-
+		ret = i915_gem_init_hw(dev);
+		if (!hw_contexts_disabled && dev_priv->hw_contexts_disabled)
+			DRM_ERROR("HW contexts didn't survive reset\n");
 		mutex_unlock(&dev->struct_mutex);
 		mutex_unlock(&dev->struct_mutex);
+		if (ret) {
+			DRM_ERROR("Failed hw init on reset %d\n", ret);
+			return ret;
+		}
 
 
 		drm_irq_uninstall(dev);
 		drm_irq_uninstall(dev);
 		drm_irq_install(dev);
 		drm_irq_install(dev);
@@ -802,6 +814,12 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	struct intel_device_info *intel_info =
 	struct intel_device_info *intel_info =
 		(struct intel_device_info *) ent->driver_data;
 		(struct intel_device_info *) ent->driver_data;
 
 
+	if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) {
+		DRM_INFO("This hardware requires preliminary hardware support.\n"
+			 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
+		return -ENODEV;
+	}
+
 	/* Only bind to function 0 of the device. Early generations
 	/* Only bind to function 0 of the device. Early generations
 	 * used function 1 as a placeholder for multi-head. This causes
 	 * used function 1 as a placeholder for multi-head. This causes
 	 * us confusion instead, especially on the systems where both
 	 * us confusion instead, especially on the systems where both
@@ -949,7 +967,6 @@ static struct drm_driver driver = {
 	.debugfs_init = i915_debugfs_init,
 	.debugfs_init = i915_debugfs_init,
 	.debugfs_cleanup = i915_debugfs_cleanup,
 	.debugfs_cleanup = i915_debugfs_cleanup,
 #endif
 #endif
-	.gem_init_object = i915_gem_init_object,
 	.gem_free_object = i915_gem_free_object,
 	.gem_free_object = i915_gem_free_object,
 	.gem_vm_ops = &i915_gem_vm_ops,
 	.gem_vm_ops = &i915_gem_vm_ops,
 
 

+ 296 - 141
drivers/gpu/drm/i915/i915_drv.h

@@ -54,6 +54,7 @@
 #define DRIVER_DATE		"20080730"
 #define DRIVER_DATE		"20080730"
 
 
 enum pipe {
 enum pipe {
+	INVALID_PIPE = -1,
 	PIPE_A = 0,
 	PIPE_A = 0,
 	PIPE_B,
 	PIPE_B,
 	PIPE_C,
 	PIPE_C,
@@ -98,13 +99,29 @@ enum intel_display_power_domain {
 	POWER_DOMAIN_TRANSCODER_A,
 	POWER_DOMAIN_TRANSCODER_A,
 	POWER_DOMAIN_TRANSCODER_B,
 	POWER_DOMAIN_TRANSCODER_B,
 	POWER_DOMAIN_TRANSCODER_C,
 	POWER_DOMAIN_TRANSCODER_C,
-	POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
+	POWER_DOMAIN_TRANSCODER_EDP,
+	POWER_DOMAIN_VGA,
+	POWER_DOMAIN_INIT,
+
+	POWER_DOMAIN_NUM,
 };
 };
 
 
+#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
+
 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
 		((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
 		((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
-#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
+#define POWER_DOMAIN_TRANSCODER(tran) \
+	((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
+	 (tran) + POWER_DOMAIN_TRANSCODER_A)
+
+#define HSW_ALWAYS_ON_POWER_DOMAINS (		\
+	BIT(POWER_DOMAIN_PIPE_A) |		\
+	BIT(POWER_DOMAIN_TRANSCODER_EDP))
+#define BDW_ALWAYS_ON_POWER_DOMAINS (		\
+	BIT(POWER_DOMAIN_PIPE_A) |		\
+	BIT(POWER_DOMAIN_TRANSCODER_EDP) |	\
+	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
 
 
 enum hpd_pin {
 enum hpd_pin {
 	HPD_NONE = 0,
 	HPD_NONE = 0,
@@ -225,9 +242,12 @@ struct intel_opregion {
 	struct opregion_header __iomem *header;
 	struct opregion_header __iomem *header;
 	struct opregion_acpi __iomem *acpi;
 	struct opregion_acpi __iomem *acpi;
 	struct opregion_swsci __iomem *swsci;
 	struct opregion_swsci __iomem *swsci;
+	u32 swsci_gbda_sub_functions;
+	u32 swsci_sbcb_sub_functions;
 	struct opregion_asle __iomem *asle;
 	struct opregion_asle __iomem *asle;
 	void __iomem *vbt;
 	void __iomem *vbt;
 	u32 __iomem *lid_state;
 	u32 __iomem *lid_state;
+	struct work_struct asle_work;
 };
 };
 #define OPREGION_SIZE            (8*1024)
 #define OPREGION_SIZE            (8*1024)
 
 
@@ -285,6 +305,7 @@ struct drm_i915_error_state {
 	u32 cpu_ring_tail[I915_NUM_RINGS];
 	u32 cpu_ring_tail[I915_NUM_RINGS];
 	u32 error; /* gen6+ */
 	u32 error; /* gen6+ */
 	u32 err_int; /* gen7 */
 	u32 err_int; /* gen7 */
+	u32 bbstate[I915_NUM_RINGS];
 	u32 instpm[I915_NUM_RINGS];
 	u32 instpm[I915_NUM_RINGS];
 	u32 instps[I915_NUM_RINGS];
 	u32 instps[I915_NUM_RINGS];
 	u32 extra_instdone[I915_NUM_INSTDONE_REG];
 	u32 extra_instdone[I915_NUM_INSTDONE_REG];
@@ -321,11 +342,13 @@ struct drm_i915_error_state {
 		u32 dirty:1;
 		u32 dirty:1;
 		u32 purgeable:1;
 		u32 purgeable:1;
 		s32 ring:4;
 		s32 ring:4;
-		u32 cache_level:2;
+		u32 cache_level:3;
 	} **active_bo, **pinned_bo;
 	} **active_bo, **pinned_bo;
 	u32 *active_bo_count, *pinned_bo_count;
 	u32 *active_bo_count, *pinned_bo_count;
 	struct intel_overlay_error_state *overlay;
 	struct intel_overlay_error_state *overlay;
 	struct intel_display_error_state *display;
 	struct intel_display_error_state *display;
+	int hangcheck_score[I915_NUM_RINGS];
+	enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
 };
 };
 
 
 struct intel_crtc_config;
 struct intel_crtc_config;
@@ -357,7 +380,7 @@ struct drm_i915_display_funcs {
 			  int target, int refclk,
 			  int target, int refclk,
 			  struct dpll *match_clock,
 			  struct dpll *match_clock,
 			  struct dpll *best_clock);
 			  struct dpll *best_clock);
-	void (*update_wm)(struct drm_device *dev);
+	void (*update_wm)(struct drm_crtc *crtc);
 	void (*update_sprite_wm)(struct drm_plane *plane,
 	void (*update_sprite_wm)(struct drm_plane *plane,
 				 struct drm_crtc *crtc,
 				 struct drm_crtc *crtc,
 				 uint32_t sprite_width, int pixel_size,
 				 uint32_t sprite_width, int pixel_size,
@@ -367,7 +390,6 @@ struct drm_i915_display_funcs {
 	 * fills out the pipe-config with the hw state. */
 	 * fills out the pipe-config with the hw state. */
 	bool (*get_pipe_config)(struct intel_crtc *,
 	bool (*get_pipe_config)(struct intel_crtc *,
 				struct intel_crtc_config *);
 				struct intel_crtc_config *);
-	void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
 	int (*crtc_mode_set)(struct drm_crtc *crtc,
 	int (*crtc_mode_set)(struct drm_crtc *crtc,
 			     int x, int y,
 			     int x, int y,
 			     struct drm_framebuffer *old_fb);
 			     struct drm_framebuffer *old_fb);
@@ -375,7 +397,8 @@ struct drm_i915_display_funcs {
 	void (*crtc_disable)(struct drm_crtc *crtc);
 	void (*crtc_disable)(struct drm_crtc *crtc);
 	void (*off)(struct drm_crtc *crtc);
 	void (*off)(struct drm_crtc *crtc);
 	void (*write_eld)(struct drm_connector *connector,
 	void (*write_eld)(struct drm_connector *connector,
-			  struct drm_crtc *crtc);
+			  struct drm_crtc *crtc,
+			  struct drm_display_mode *mode);
 	void (*fdi_link_train)(struct drm_crtc *crtc);
 	void (*fdi_link_train)(struct drm_crtc *crtc);
 	void (*init_clock_gating)(struct drm_device *dev);
 	void (*init_clock_gating)(struct drm_device *dev);
 	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
 	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
@@ -395,6 +418,20 @@ struct drm_i915_display_funcs {
 struct intel_uncore_funcs {
 struct intel_uncore_funcs {
 	void (*force_wake_get)(struct drm_i915_private *dev_priv);
 	void (*force_wake_get)(struct drm_i915_private *dev_priv);
 	void (*force_wake_put)(struct drm_i915_private *dev_priv);
 	void (*force_wake_put)(struct drm_i915_private *dev_priv);
+
+	uint8_t  (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+	uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+	uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+	uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+
+	void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
+				uint8_t val, bool trace);
+	void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
+				uint16_t val, bool trace);
+	void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
+				uint32_t val, bool trace);
+	void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
+				uint64_t val, bool trace);
 };
 };
 
 
 struct intel_uncore {
 struct intel_uncore {
@@ -404,6 +441,8 @@ struct intel_uncore {
 
 
 	unsigned fifo_count;
 	unsigned fifo_count;
 	unsigned forcewake_count;
 	unsigned forcewake_count;
+
+	struct delayed_work force_wake_work;
 };
 };
 
 
 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -420,7 +459,7 @@ struct intel_uncore {
 	func(is_ivybridge) sep \
 	func(is_ivybridge) sep \
 	func(is_valleyview) sep \
 	func(is_valleyview) sep \
 	func(is_haswell) sep \
 	func(is_haswell) sep \
-	func(has_force_wake) sep \
+	func(is_preliminary) sep \
 	func(has_fbc) sep \
 	func(has_fbc) sep \
 	func(has_pipe_cxsr) sep \
 	func(has_pipe_cxsr) sep \
 	func(has_hotplug) sep \
 	func(has_hotplug) sep \
@@ -428,9 +467,6 @@ struct intel_uncore {
 	func(has_overlay) sep \
 	func(has_overlay) sep \
 	func(overlay_needs_physical) sep \
 	func(overlay_needs_physical) sep \
 	func(supports_tv) sep \
 	func(supports_tv) sep \
-	func(has_bsd_ring) sep \
-	func(has_blt_ring) sep \
-	func(has_vebox_ring) sep \
 	func(has_llc) sep \
 	func(has_llc) sep \
 	func(has_ddi) sep \
 	func(has_ddi) sep \
 	func(has_fpga_dbg)
 	func(has_fpga_dbg)
@@ -442,6 +478,7 @@ struct intel_device_info {
 	u32 display_mmio_offset;
 	u32 display_mmio_offset;
 	u8 num_pipes:3;
 	u8 num_pipes:3;
 	u8 gen;
 	u8 gen;
+	u8 ring_mask; /* Rings supported by the HW */
 	DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
 	DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
 };
 };
 
 
@@ -542,10 +579,21 @@ struct i915_gtt {
 struct i915_hw_ppgtt {
 struct i915_hw_ppgtt {
 	struct i915_address_space base;
 	struct i915_address_space base;
 	unsigned num_pd_entries;
 	unsigned num_pd_entries;
-	struct page **pt_pages;
-	uint32_t pd_offset;
-	dma_addr_t *pt_dma_addr;
-
+	union {
+		struct page **pt_pages;
+		struct page *gen8_pt_pages;
+	};
+	struct page *pd_pages;
+	int num_pd_pages;
+	int num_pt_pages;
+	union {
+		uint32_t pd_offset;
+		dma_addr_t pd_dma_addr[4];
+	};
+	union {
+		dma_addr_t *pt_dma_addr;
+		dma_addr_t *gen8_pt_dma_addr[4];
+	};
 	int (*enable)(struct drm_device *dev);
 	int (*enable)(struct drm_device *dev);
 };
 };
 
 
@@ -570,6 +618,13 @@ struct i915_vma {
 	/** This vma's place in the batchbuffer or on the eviction list */
 	/** This vma's place in the batchbuffer or on the eviction list */
 	struct list_head exec_list;
 	struct list_head exec_list;
 
 
+	/**
+	 * Used for performing relocations during execbuffer insertion.
+	 */
+	struct hlist_node exec_node;
+	unsigned long exec_handle;
+	struct drm_i915_gem_exec_object2 *exec_entry;
+
 };
 };
 
 
 struct i915_ctx_hang_stats {
 struct i915_ctx_hang_stats {
@@ -578,6 +633,12 @@ struct i915_ctx_hang_stats {
 
 
 	/* This context had batch active when hang was declared */
 	/* This context had batch active when hang was declared */
 	unsigned batch_active;
 	unsigned batch_active;
+
+	/* Time when this context was last blamed for a GPU reset */
+	unsigned long guilty_ts;
+
+	/* This context is banned to submit more work */
+	bool banned;
 };
 };
 
 
 /* This must match up with the value previously used for execbuf2.rsvd1. */
 /* This must match up with the value previously used for execbuf2.rsvd1. */
@@ -586,10 +647,13 @@ struct i915_hw_context {
 	struct kref ref;
 	struct kref ref;
 	int id;
 	int id;
 	bool is_initialized;
 	bool is_initialized;
+	uint8_t remap_slice;
 	struct drm_i915_file_private *file_priv;
 	struct drm_i915_file_private *file_priv;
 	struct intel_ring_buffer *ring;
 	struct intel_ring_buffer *ring;
 	struct drm_i915_gem_object *obj;
 	struct drm_i915_gem_object *obj;
 	struct i915_ctx_hang_stats hang_stats;
 	struct i915_ctx_hang_stats hang_stats;
+
+	struct list_head link;
 };
 };
 
 
 struct i915_fbc {
 struct i915_fbc {
@@ -623,17 +687,9 @@ struct i915_fbc {
 	} no_fbc_reason;
 	} no_fbc_reason;
 };
 };
 
 
-enum no_psr_reason {
-	PSR_NO_SOURCE, /* Not supported on platform */
-	PSR_NO_SINK, /* Not supported by panel */
-	PSR_MODULE_PARAM,
-	PSR_CRTC_NOT_ACTIVE,
-	PSR_PWR_WELL_ENABLED,
-	PSR_NOT_TILED,
-	PSR_SPRITE_ENABLED,
-	PSR_S3D_ENABLED,
-	PSR_INTERLACED_ENABLED,
-	PSR_HSW_NOT_DDIA,
+struct i915_psr {
+	bool sink_support;
+	bool source_ok;
 };
 };
 
 
 enum intel_pch {
 enum intel_pch {
@@ -704,6 +760,9 @@ struct i915_suspend_saved_registers {
 	u32 saveBLC_HIST_CTL;
 	u32 saveBLC_HIST_CTL;
 	u32 saveBLC_PWM_CTL;
 	u32 saveBLC_PWM_CTL;
 	u32 saveBLC_PWM_CTL2;
 	u32 saveBLC_PWM_CTL2;
+	u32 saveBLC_HIST_CTL_B;
+	u32 saveBLC_PWM_CTL_B;
+	u32 saveBLC_PWM_CTL2_B;
 	u32 saveBLC_CPU_PWM_CTL;
 	u32 saveBLC_CPU_PWM_CTL;
 	u32 saveBLC_CPU_PWM_CTL2;
 	u32 saveBLC_CPU_PWM_CTL2;
 	u32 saveFPB0;
 	u32 saveFPB0;
@@ -823,17 +882,20 @@ struct intel_gen6_power_mgmt {
 	struct work_struct work;
 	struct work_struct work;
 	u32 pm_iir;
 	u32 pm_iir;
 
 
-	/* On vlv we need to manually drop to Vmin with a delayed work. */
-	struct delayed_work vlv_work;
-
 	/* The below variables an all the rps hw state are protected by
 	/* The below variables an all the rps hw state are protected by
 	 * dev->struct mutext. */
 	 * dev->struct mutext. */
 	u8 cur_delay;
 	u8 cur_delay;
 	u8 min_delay;
 	u8 min_delay;
 	u8 max_delay;
 	u8 max_delay;
 	u8 rpe_delay;
 	u8 rpe_delay;
+	u8 rp1_delay;
+	u8 rp0_delay;
 	u8 hw_max;
 	u8 hw_max;
 
 
+	int last_adj;
+	enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
+
+	bool enabled;
 	struct delayed_work delayed_resume_work;
 	struct delayed_work delayed_resume_work;
 
 
 	/*
 	/*
@@ -870,11 +932,21 @@ struct intel_ilk_power_mgmt {
 
 
 /* Power well structure for haswell */
 /* Power well structure for haswell */
 struct i915_power_well {
 struct i915_power_well {
-	struct drm_device *device;
-	spinlock_t lock;
 	/* power well enable/disable usage count */
 	/* power well enable/disable usage count */
 	int count;
 	int count;
-	int i915_request;
+};
+
+#define I915_MAX_POWER_WELLS 1
+
+struct i915_power_domains {
+	/*
+	 * Power wells needed for initialization at driver init and suspend
+	 * time are on. They are kept on until after the first modeset.
+	 */
+	bool init_power_on;
+
+	struct mutex lock;
+	struct i915_power_well power_wells[I915_MAX_POWER_WELLS];
 };
 };
 
 
 struct i915_dri1_state {
 struct i915_dri1_state {
@@ -902,9 +974,11 @@ struct i915_ums_state {
 	int mm_suspended;
 	int mm_suspended;
 };
 };
 
 
+#define MAX_L3_SLICES 2
 struct intel_l3_parity {
 struct intel_l3_parity {
-	u32 *remap_info;
+	u32 *remap_info[MAX_L3_SLICES];
 	struct work_struct error_work;
 	struct work_struct error_work;
+	int which_slice;
 };
 };
 
 
 struct i915_gem_mm {
 struct i915_gem_mm {
@@ -941,6 +1015,15 @@ struct i915_gem_mm {
 	 */
 	 */
 	struct delayed_work retire_work;
 	struct delayed_work retire_work;
 
 
+	/**
+	 * When we detect an idle GPU, we want to turn on
+	 * powersaving features. So once we see that there
+	 * are no more requests outstanding and no more
+	 * arrive within a small period of time, we fire
+	 * off the idle_work.
+	 */
+	struct delayed_work idle_work;
+
 	/**
 	/**
 	 * Are we in a non-interruptible section of code like
 	 * Are we in a non-interruptible section of code like
 	 * modesetting?
 	 * modesetting?
@@ -979,6 +1062,9 @@ struct i915_gpu_error {
 	/* For hangcheck timer */
 	/* For hangcheck timer */
 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+	/* Hang gpu twice in this window and your context gets banned */
+#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
+
 	struct timer_list hangcheck_timer;
 	struct timer_list hangcheck_timer;
 
 
 	/* For reset and error_state handling. */
 	/* For reset and error_state handling. */
@@ -987,7 +1073,8 @@ struct i915_gpu_error {
 	struct drm_i915_error_state *first_error;
 	struct drm_i915_error_state *first_error;
 	struct work_struct work;
 	struct work_struct work;
 
 
-	unsigned long last_reset;
+
+	unsigned long missed_irq_rings;
 
 
 	/**
 	/**
 	 * State variable and reset counter controlling the reset flow
 	 * State variable and reset counter controlling the reset flow
@@ -1027,6 +1114,9 @@ struct i915_gpu_error {
 
 
 	/* For gpu hang simulation. */
 	/* For gpu hang simulation. */
 	unsigned int stop_rings;
 	unsigned int stop_rings;
+
+	/* For missed irq/seqno simulation. */
+	unsigned int test_irq_rings;
 };
 };
 
 
 enum modeset_restore {
 enum modeset_restore {
@@ -1035,6 +1125,14 @@ enum modeset_restore {
 	MODESET_SUSPENDED,
 	MODESET_SUSPENDED,
 };
 };
 
 
+struct ddi_vbt_port_info {
+	uint8_t hdmi_level_shift;
+
+	uint8_t supports_dvi:1;
+	uint8_t supports_hdmi:1;
+	uint8_t supports_dp:1;
+};
+
 struct intel_vbt_data {
 struct intel_vbt_data {
 	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
 	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
 	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
 	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -1060,10 +1158,17 @@ struct intel_vbt_data {
 	int edp_bpp;
 	int edp_bpp;
 	struct edp_power_seq edp_pps;
 	struct edp_power_seq edp_pps;
 
 
+	/* MIPI DSI */
+	struct {
+		u16 panel_id;
+	} dsi;
+
 	int crt_ddc_pin;
 	int crt_ddc_pin;
 
 
 	int child_dev_num;
 	int child_dev_num;
-	struct child_device_config *child_dev;
+	union child_device_config *child_dev;
+
+	struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
 };
 };
 
 
 enum intel_ddb_partitioning {
 enum intel_ddb_partitioning {
@@ -1079,6 +1184,15 @@ struct intel_wm_level {
 	uint32_t fbc_val;
 	uint32_t fbc_val;
 };
 };
 
 
+struct hsw_wm_values {
+	uint32_t wm_pipe[3];
+	uint32_t wm_lp[3];
+	uint32_t wm_lp_spr[3];
+	uint32_t wm_linetime[3];
+	bool enable_fbc_wm;
+	enum intel_ddb_partitioning partitioning;
+};
+
 /*
 /*
  * This struct tracks the state needed for the Package C8+ feature.
  * This struct tracks the state needed for the Package C8+ feature.
  *
  *
@@ -1148,6 +1262,36 @@ struct i915_package_c8 {
 	} regsave;
 	} regsave;
 };
 };
 
 
+enum intel_pipe_crc_source {
+	INTEL_PIPE_CRC_SOURCE_NONE,
+	INTEL_PIPE_CRC_SOURCE_PLANE1,
+	INTEL_PIPE_CRC_SOURCE_PLANE2,
+	INTEL_PIPE_CRC_SOURCE_PF,
+	INTEL_PIPE_CRC_SOURCE_PIPE,
+	/* TV/DP on pre-gen5/vlv can't use the pipe source. */
+	INTEL_PIPE_CRC_SOURCE_TV,
+	INTEL_PIPE_CRC_SOURCE_DP_B,
+	INTEL_PIPE_CRC_SOURCE_DP_C,
+	INTEL_PIPE_CRC_SOURCE_DP_D,
+	INTEL_PIPE_CRC_SOURCE_AUTO,
+	INTEL_PIPE_CRC_SOURCE_MAX,
+};
+
+struct intel_pipe_crc_entry {
+	uint32_t frame;
+	uint32_t crc[5];
+};
+
+#define INTEL_PIPE_CRC_ENTRIES_NR	128
+struct intel_pipe_crc {
+	spinlock_t lock;
+	bool opened;		/* exclusive access to the result file */
+	struct intel_pipe_crc_entry *entries;
+	enum intel_pipe_crc_source source;
+	int head, tail;
+	wait_queue_head_t wq;
+};
+
 typedef struct drm_i915_private {
 typedef struct drm_i915_private {
 	struct drm_device *dev;
 	struct drm_device *dev;
 	struct kmem_cache *slab;
 	struct kmem_cache *slab;
@@ -1193,7 +1337,10 @@ typedef struct drm_i915_private {
 	struct mutex dpio_lock;
 	struct mutex dpio_lock;
 
 
 	/** Cached value of IMR to avoid reads in updating the bitfield */
 	/** Cached value of IMR to avoid reads in updating the bitfield */
-	u32 irq_mask;
+	union {
+		u32 irq_mask;
+		u32 de_irq_mask[I915_MAX_PIPES];
+	};
 	u32 gt_irq_mask;
 	u32 gt_irq_mask;
 	u32 pm_irq_mask;
 	u32 pm_irq_mask;
 
 
@@ -1272,6 +1419,10 @@ typedef struct drm_i915_private {
 	struct drm_crtc *pipe_to_crtc_mapping[3];
 	struct drm_crtc *pipe_to_crtc_mapping[3];
 	wait_queue_head_t pending_flip_queue;
 	wait_queue_head_t pending_flip_queue;
 
 
+#ifdef CONFIG_DEBUG_FS
+	struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
+#endif
+
 	int num_shared_dpll;
 	int num_shared_dpll;
 	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
 	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
 	struct intel_ddi_plls ddi_plls;
 	struct intel_ddi_plls ddi_plls;
@@ -1297,17 +1448,18 @@ typedef struct drm_i915_private {
 	 * mchdev_lock in intel_pm.c */
 	 * mchdev_lock in intel_pm.c */
 	struct intel_ilk_power_mgmt ips;
 	struct intel_ilk_power_mgmt ips;
 
 
-	/* Haswell power well */
-	struct i915_power_well power_well;
+	struct i915_power_domains power_domains;
 
 
-	enum no_psr_reason no_psr_reason;
+	struct i915_psr psr;
 
 
 	struct i915_gpu_error gpu_error;
 	struct i915_gpu_error gpu_error;
 
 
 	struct drm_i915_gem_object *vlv_pctx;
 	struct drm_i915_gem_object *vlv_pctx;
 
 
+#ifdef CONFIG_DRM_I915_FBDEV
 	/* list of fbdev register on this device */
 	/* list of fbdev register on this device */
 	struct intel_fbdev *fbdev;
 	struct intel_fbdev *fbdev;
+#endif
 
 
 	/*
 	/*
 	 * The console may be contended at resume, but we don't
 	 * The console may be contended at resume, but we don't
@@ -1320,6 +1472,7 @@ typedef struct drm_i915_private {
 
 
 	bool hw_contexts_disabled;
 	bool hw_contexts_disabled;
 	uint32_t hw_context_size;
 	uint32_t hw_context_size;
+	struct list_head context_list;
 
 
 	u32 fdi_rx_config;
 	u32 fdi_rx_config;
 
 
@@ -1337,6 +1490,9 @@ typedef struct drm_i915_private {
 		uint16_t spr_latency[5];
 		uint16_t spr_latency[5];
 		/* cursor */
 		/* cursor */
 		uint16_t cur_latency[5];
 		uint16_t cur_latency[5];
+
+		/* current hardware state */
+		struct hsw_wm_values hw;
 	} wm;
 	} wm;
 
 
 	struct i915_package_c8 pc8;
 	struct i915_package_c8 pc8;
@@ -1400,8 +1556,6 @@ struct drm_i915_gem_object {
 	struct list_head ring_list;
 	struct list_head ring_list;
 	/** Used in execbuf to temporarily hold a ref */
 	/** Used in execbuf to temporarily hold a ref */
 	struct list_head obj_exec_link;
 	struct list_head obj_exec_link;
-	/** This object's place in the batchbuffer or on the eviction list */
-	struct list_head exec_list;
 
 
 	/**
 	/**
 	 * This is set if the object is on the active lists (has pending
 	 * This is set if the object is on the active lists (has pending
@@ -1487,13 +1641,6 @@ struct drm_i915_gem_object {
 	void *dma_buf_vmapping;
 	void *dma_buf_vmapping;
 	int vmapping_count;
 	int vmapping_count;
 
 
-	/**
-	 * Used for performing relocations during execbuffer insertion.
-	 */
-	struct hlist_node exec_node;
-	unsigned long exec_handle;
-	struct drm_i915_gem_exec_object2 *exec_entry;
-
 	struct intel_ring_buffer *ring;
 	struct intel_ring_buffer *ring;
 
 
 	/** Breadcrumb of last rendering to the buffer. */
 	/** Breadcrumb of last rendering to the buffer. */
@@ -1505,11 +1652,14 @@ struct drm_i915_gem_object {
 	/** Current tiling stride for the object, if it's tiled. */
 	/** Current tiling stride for the object, if it's tiled. */
 	uint32_t stride;
 	uint32_t stride;
 
 
+	/** References from framebuffers, locks out tiling changes. */
+	unsigned long framebuffer_references;
+
 	/** Record of address bit 17 of each page at last unbind. */
 	/** Record of address bit 17 of each page at last unbind. */
 	unsigned long *bit_17;
 	unsigned long *bit_17;
 
 
 	/** User space pin count and filp owning the pin */
 	/** User space pin count and filp owning the pin */
-	uint32_t user_pin_count;
+	unsigned long user_pin_count;
 	struct drm_file *pin_filp;
 	struct drm_file *pin_filp;
 
 
 	/** for phy allocated objects */
 	/** for phy allocated objects */
@@ -1560,48 +1710,56 @@ struct drm_i915_gem_request {
 };
 };
 
 
 struct drm_i915_file_private {
 struct drm_i915_file_private {
+	struct drm_i915_private *dev_priv;
+
 	struct {
 	struct {
 		spinlock_t lock;
 		spinlock_t lock;
 		struct list_head request_list;
 		struct list_head request_list;
+		struct delayed_work idle_work;
 	} mm;
 	} mm;
 	struct idr context_idr;
 	struct idr context_idr;
 
 
 	struct i915_ctx_hang_stats hang_stats;
 	struct i915_ctx_hang_stats hang_stats;
+	atomic_t rps_wait_boost;
 };
 };
 
 
 #define INTEL_INFO(dev)	(to_i915(dev)->info)
 #define INTEL_INFO(dev)	(to_i915(dev)->info)
 
 
-#define IS_I830(dev)		((dev)->pci_device == 0x3577)
-#define IS_845G(dev)		((dev)->pci_device == 0x2562)
+#define IS_I830(dev)		((dev)->pdev->device == 0x3577)
+#define IS_845G(dev)		((dev)->pdev->device == 0x2562)
 #define IS_I85X(dev)		(INTEL_INFO(dev)->is_i85x)
 #define IS_I85X(dev)		(INTEL_INFO(dev)->is_i85x)
-#define IS_I865G(dev)		((dev)->pci_device == 0x2572)
+#define IS_I865G(dev)		((dev)->pdev->device == 0x2572)
 #define IS_I915G(dev)		(INTEL_INFO(dev)->is_i915g)
 #define IS_I915G(dev)		(INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev)		((dev)->pci_device == 0x2592)
-#define IS_I945G(dev)		((dev)->pci_device == 0x2772)
+#define IS_I915GM(dev)		((dev)->pdev->device == 0x2592)
+#define IS_I945G(dev)		((dev)->pdev->device == 0x2772)
 #define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
 #define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
 #define IS_BROADWATER(dev)	(INTEL_INFO(dev)->is_broadwater)
 #define IS_BROADWATER(dev)	(INTEL_INFO(dev)->is_broadwater)
 #define IS_CRESTLINE(dev)	(INTEL_INFO(dev)->is_crestline)
 #define IS_CRESTLINE(dev)	(INTEL_INFO(dev)->is_crestline)
-#define IS_GM45(dev)		((dev)->pci_device == 0x2A42)
+#define IS_GM45(dev)		((dev)->pdev->device == 0x2A42)
 #define IS_G4X(dev)		(INTEL_INFO(dev)->is_g4x)
 #define IS_G4X(dev)		(INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev)	((dev)->pci_device == 0xa001)
-#define IS_PINEVIEW_M(dev)	((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW_G(dev)	((dev)->pdev->device == 0xa001)
+#define IS_PINEVIEW_M(dev)	((dev)->pdev->device == 0xa011)
 #define IS_PINEVIEW(dev)	(INTEL_INFO(dev)->is_pineview)
 #define IS_PINEVIEW(dev)	(INTEL_INFO(dev)->is_pineview)
 #define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
 #define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_M(dev)	((dev)->pci_device == 0x0046)
+#define IS_IRONLAKE_M(dev)	((dev)->pdev->device == 0x0046)
 #define IS_IVYBRIDGE(dev)	(INTEL_INFO(dev)->is_ivybridge)
 #define IS_IVYBRIDGE(dev)	(INTEL_INFO(dev)->is_ivybridge)
-#define IS_IVB_GT1(dev)		((dev)->pci_device == 0x0156 || \
-				 (dev)->pci_device == 0x0152 ||	\
-				 (dev)->pci_device == 0x015a)
-#define IS_SNB_GT1(dev)		((dev)->pci_device == 0x0102 || \
-				 (dev)->pci_device == 0x0106 ||	\
-				 (dev)->pci_device == 0x010A)
+#define IS_IVB_GT1(dev)		((dev)->pdev->device == 0x0156 || \
+				 (dev)->pdev->device == 0x0152 || \
+				 (dev)->pdev->device == 0x015a)
+#define IS_SNB_GT1(dev)		((dev)->pdev->device == 0x0102 || \
+				 (dev)->pdev->device == 0x0106 || \
+				 (dev)->pdev->device == 0x010A)
 #define IS_VALLEYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview)
 #define IS_VALLEYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview)
 #define IS_HASWELL(dev)	(INTEL_INFO(dev)->is_haswell)
 #define IS_HASWELL(dev)	(INTEL_INFO(dev)->is_haswell)
+#define IS_BROADWELL(dev)	(INTEL_INFO(dev)->gen == 8)
 #define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
 #define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
 #define IS_HSW_EARLY_SDV(dev)	(IS_HASWELL(dev) && \
 #define IS_HSW_EARLY_SDV(dev)	(IS_HASWELL(dev) && \
-				 ((dev)->pci_device & 0xFF00) == 0x0C00)
+				 ((dev)->pdev->device & 0xFF00) == 0x0C00)
 #define IS_ULT(dev)		(IS_HASWELL(dev) && \
 #define IS_ULT(dev)		(IS_HASWELL(dev) && \
-				 ((dev)->pci_device & 0xFF00) == 0x0A00)
+				 ((dev)->pdev->device & 0xFF00) == 0x0A00)
+#define IS_HSW_GT3(dev)		(IS_HASWELL(dev) && \
+				 ((dev)->pdev->device & 0x00F0) == 0x0020)
+#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
 
 
 /*
 /*
  * The genX designation typically refers to the render engine, so render
  * The genX designation typically refers to the render engine, so render
@@ -1615,10 +1773,15 @@ struct drm_i915_file_private {
 #define IS_GEN5(dev)	(INTEL_INFO(dev)->gen == 5)
 #define IS_GEN5(dev)	(INTEL_INFO(dev)->gen == 5)
 #define IS_GEN6(dev)	(INTEL_INFO(dev)->gen == 6)
 #define IS_GEN6(dev)	(INTEL_INFO(dev)->gen == 6)
 #define IS_GEN7(dev)	(INTEL_INFO(dev)->gen == 7)
 #define IS_GEN7(dev)	(INTEL_INFO(dev)->gen == 7)
-
-#define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring)
-#define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
-#define HAS_VEBOX(dev)          (INTEL_INFO(dev)->has_vebox_ring)
+#define IS_GEN8(dev)	(INTEL_INFO(dev)->gen == 8)
+
+#define RENDER_RING		(1<<RCS)
+#define BSD_RING		(1<<VCS)
+#define BLT_RING		(1<<BCS)
+#define VEBOX_RING		(1<<VECS)
+#define HAS_BSD(dev)            (INTEL_INFO(dev)->ring_mask & BSD_RING)
+#define HAS_BLT(dev)            (INTEL_INFO(dev)->ring_mask & BLT_RING)
+#define HAS_VEBOX(dev)            (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
 #define HAS_LLC(dev)            (INTEL_INFO(dev)->has_llc)
 #define HAS_LLC(dev)            (INTEL_INFO(dev)->has_llc)
 #define HAS_WT(dev)            (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
 #define HAS_WT(dev)            (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
 #define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
 #define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
@@ -1640,7 +1803,6 @@ struct drm_i915_file_private {
 #define SUPPORTS_DIGITAL_OUTPUTS(dev)	(!IS_GEN2(dev) && !IS_PINEVIEW(dev))
 #define SUPPORTS_DIGITAL_OUTPUTS(dev)	(!IS_GEN2(dev) && !IS_PINEVIEW(dev))
 #define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_GEN5(dev))
 #define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_GEN5(dev))
 #define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_GEN5(dev))
 #define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_EDP(dev)		(IS_IRONLAKE_M(dev))
 #define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
 #define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
 #define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
 #define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
 
 
@@ -1648,11 +1810,12 @@ struct drm_i915_file_private {
 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
 
 
-#define HAS_IPS(dev)		(IS_ULT(dev))
+#define HAS_IPS(dev)		(IS_ULT(dev) || IS_BROADWELL(dev))
 
 
 #define HAS_DDI(dev)		(INTEL_INFO(dev)->has_ddi)
 #define HAS_DDI(dev)		(INTEL_INFO(dev)->has_ddi)
-#define HAS_POWER_WELL(dev)	(IS_HASWELL(dev))
+#define HAS_POWER_WELL(dev)	(IS_HASWELL(dev) || IS_BROADWELL(dev))
 #define HAS_FPGA_DBG_UNCLAIMED(dev)	(INTEL_INFO(dev)->has_fpga_dbg)
 #define HAS_FPGA_DBG_UNCLAIMED(dev)	(INTEL_INFO(dev)->has_fpga_dbg)
+#define HAS_PSR(dev)		(IS_HASWELL(dev) || IS_BROADWELL(dev))
 
 
 #define INTEL_PCH_DEVICE_ID_MASK		0xff00
 #define INTEL_PCH_DEVICE_ID_MASK		0xff00
 #define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
 #define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
@@ -1668,35 +1831,14 @@ struct drm_i915_file_private {
 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
 
 
-#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
-
-#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+/* DPF == dynamic parity feature */
+#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
 
 
 #define GT_FREQUENCY_MULTIPLIER 50
 #define GT_FREQUENCY_MULTIPLIER 50
 
 
 #include "i915_trace.h"
 #include "i915_trace.h"
 
 
-/**
- * RC6 is a special power stage which allows the GPU to enter an very
- * low-voltage mode when idle, using down to 0V while at this stage.  This
- * stage is entered automatically when the GPU is idle when RC6 support is
- * enabled, and as soon as new workload arises GPU wakes up automatically as well.
- *
- * There are different RC6 modes available in Intel GPU, which differentiate
- * among each other with the latency required to enter and leave RC6 and
- * voltage consumed by the GPU in different states.
- *
- * The combination of the following flags define which states GPU is allowed
- * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
- * RC6pp is deepest RC6. Their support by hardware varies according to the
- * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
- * which brings the most power savings; deeper states save more power, but
- * require higher latency to switch to and wake up.
- */
-#define INTEL_RC6_ENABLE			(1<<0)
-#define INTEL_RC6p_ENABLE			(1<<1)
-#define INTEL_RC6pp_ENABLE			(1<<2)
-
 extern const struct drm_ioctl_desc i915_ioctls[];
 extern const struct drm_ioctl_desc i915_ioctls[];
 extern int i915_max_ioctl;
 extern int i915_max_ioctl;
 extern unsigned int i915_fbpercrtc __always_unused;
 extern unsigned int i915_fbpercrtc __always_unused;
@@ -1767,12 +1909,13 @@ extern void intel_uncore_early_sanitize(struct drm_device *dev);
 extern void intel_uncore_init(struct drm_device *dev);
 extern void intel_uncore_init(struct drm_device *dev);
 extern void intel_uncore_clear_errors(struct drm_device *dev);
 extern void intel_uncore_clear_errors(struct drm_device *dev);
 extern void intel_uncore_check_errors(struct drm_device *dev);
 extern void intel_uncore_check_errors(struct drm_device *dev);
+extern void intel_uncore_fini(struct drm_device *dev);
 
 
 void
 void
-i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
 
 
 void
 void
-i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
 
 
 /* i915_gem.c */
 /* i915_gem.c */
 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
@@ -1824,14 +1967,11 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
 void i915_gem_load(struct drm_device *dev);
 void i915_gem_load(struct drm_device *dev);
 void *i915_gem_object_alloc(struct drm_device *dev);
 void *i915_gem_object_alloc(struct drm_device *dev);
 void i915_gem_object_free(struct drm_i915_gem_object *obj);
 void i915_gem_object_free(struct drm_i915_gem_object *obj);
-int i915_gem_init_object(struct drm_gem_object *obj);
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
 			 const struct drm_i915_gem_object_ops *ops);
 			 const struct drm_i915_gem_object_ops *ops);
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 						  size_t size);
 						  size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
 void i915_gem_free_object(struct drm_gem_object *obj);
-struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
-				     struct i915_address_space *vm);
 void i915_gem_vma_destroy(struct i915_vma *vma);
 void i915_gem_vma_destroy(struct i915_vma *vma);
 
 
 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
@@ -1870,9 +2010,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
 			 struct intel_ring_buffer *to);
 			 struct intel_ring_buffer *to);
-void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
-				    struct intel_ring_buffer *ring);
-
+void i915_vma_move_to_active(struct i915_vma *vma,
+			     struct intel_ring_buffer *ring);
 int i915_gem_dumb_create(struct drm_file *file_priv,
 int i915_gem_dumb_create(struct drm_file *file_priv,
 			 struct drm_device *dev,
 			 struct drm_device *dev,
 			 struct drm_mode_create_dumb *args);
 			 struct drm_mode_create_dumb *args);
@@ -1913,7 +2052,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
 	}
 	}
 }
 }
 
 
-void i915_gem_retire_requests(struct drm_device *dev);
+bool i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
 void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
 				      bool interruptible);
 				      bool interruptible);
@@ -1933,11 +2072,11 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
 int __must_check i915_gem_init(struct drm_device *dev);
 int __must_check i915_gem_init(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
-void i915_gem_l3_remap(struct drm_device *dev);
+int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gpu_idle(struct drm_device *dev);
-int __must_check i915_gem_idle(struct drm_device *dev);
+int __must_check i915_gem_suspend(struct drm_device *dev);
 int __i915_add_request(struct intel_ring_buffer *ring,
 int __i915_add_request(struct intel_ring_buffer *ring,
 		       struct drm_file *file,
 		       struct drm_file *file,
 		       struct drm_i915_gem_object *batch_obj,
 		       struct drm_i915_gem_object *batch_obj,
@@ -1964,6 +2103,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
 void i915_gem_detach_phys_object(struct drm_device *dev,
 void i915_gem_detach_phys_object(struct drm_device *dev,
 				 struct drm_i915_gem_object *obj);
 				 struct drm_i915_gem_object *obj);
 void i915_gem_free_all_phys_object(struct drm_device *dev);
 void i915_gem_free_all_phys_object(struct drm_device *dev);
+int i915_gem_open(struct drm_device *dev, struct drm_file *file);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
 
 uint32_t
 uint32_t
@@ -1995,6 +2135,9 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
 struct i915_vma *
 struct i915_vma *
 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
 				  struct i915_address_space *vm);
 				  struct i915_address_space *vm);
+
+struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
+
 /* Some GGTT VM helpers */
 /* Some GGTT VM helpers */
 #define obj_to_ggtt(obj) \
 #define obj_to_ggtt(obj) \
 	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
 	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
@@ -2031,7 +2174,6 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
 	return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
 	return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
 				   map_and_fenceable, nonblocking);
 				   map_and_fenceable, nonblocking);
 }
 }
-#undef obj_to_ggtt
 
 
 /* i915_gem_context.c */
 /* i915_gem_context.c */
 void i915_gem_context_init(struct drm_device *dev);
 void i915_gem_context_init(struct drm_device *dev);
@@ -2094,6 +2236,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
 					  unsigned cache_level,
 					  unsigned cache_level,
 					  bool mappable,
 					  bool mappable,
 					  bool nonblock);
 					  bool nonblock);
+int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
 int i915_gem_evict_everything(struct drm_device *dev);
 int i915_gem_evict_everything(struct drm_device *dev);
 
 
 /* i915_gem_stolen.c */
 /* i915_gem_stolen.c */
@@ -2133,6 +2276,11 @@ int i915_verify_lists(struct drm_device *dev);
 /* i915_debugfs.c */
 /* i915_debugfs.c */
 int i915_debugfs_init(struct drm_minor *minor);
 int i915_debugfs_init(struct drm_minor *minor);
 void i915_debugfs_cleanup(struct drm_minor *minor);
 void i915_debugfs_cleanup(struct drm_minor *minor);
+#ifdef CONFIG_DEBUG_FS
+void intel_display_crc_init(struct drm_device *dev);
+#else
+static inline void intel_display_crc_init(struct drm_device *dev) {}
+#endif
 
 
 /* i915_gpu_error.c */
 /* i915_gpu_error.c */
 __printf(2, 3)
 __printf(2, 3)
@@ -2186,15 +2334,30 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
 extern void intel_i2c_reset(struct drm_device *dev);
 extern void intel_i2c_reset(struct drm_device *dev);
 
 
 /* intel_opregion.c */
 /* intel_opregion.c */
+struct intel_encoder;
 extern int intel_opregion_setup(struct drm_device *dev);
 extern int intel_opregion_setup(struct drm_device *dev);
 #ifdef CONFIG_ACPI
 #ifdef CONFIG_ACPI
 extern void intel_opregion_init(struct drm_device *dev);
 extern void intel_opregion_init(struct drm_device *dev);
 extern void intel_opregion_fini(struct drm_device *dev);
 extern void intel_opregion_fini(struct drm_device *dev);
 extern void intel_opregion_asle_intr(struct drm_device *dev);
 extern void intel_opregion_asle_intr(struct drm_device *dev);
+extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+					 bool enable);
+extern int intel_opregion_notify_adapter(struct drm_device *dev,
+					 pci_power_t state);
 #else
 #else
 static inline void intel_opregion_init(struct drm_device *dev) { return; }
 static inline void intel_opregion_init(struct drm_device *dev) { return; }
 static inline void intel_opregion_fini(struct drm_device *dev) { return; }
 static inline void intel_opregion_fini(struct drm_device *dev) { return; }
 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
+static inline int
+intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
+{
+	return 0;
+}
+static inline int
+intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+{
+	return 0;
+}
 #endif
 #endif
 
 
 /* intel_acpi.c */
 /* intel_acpi.c */
@@ -2256,8 +2419,16 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
 void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
 void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
-u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg);
-void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val);
+u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
+void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
 		   enum intel_sbi_destination destination);
 		   enum intel_sbi_destination destination);
 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
@@ -2266,37 +2437,21 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
 int vlv_gpu_freq(int ddr_freq, int val);
 int vlv_gpu_freq(int ddr_freq, int val);
 int vlv_freq_opcode(int ddr_freq, int val);
 int vlv_freq_opcode(int ddr_freq, int val);
 
 
-#define __i915_read(x) \
-	u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace);
-__i915_read(8)
-__i915_read(16)
-__i915_read(32)
-__i915_read(64)
-#undef __i915_read
-
-#define __i915_write(x) \
-	void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace);
-__i915_write(8)
-__i915_write(16)
-__i915_write(32)
-__i915_write(64)
-#undef __i915_write
-
-#define I915_READ8(reg)		i915_read8(dev_priv, (reg), true)
-#define I915_WRITE8(reg, val)	i915_write8(dev_priv, (reg), (val), true)
-
-#define I915_READ16(reg)	i915_read16(dev_priv, (reg), true)
-#define I915_WRITE16(reg, val)	i915_write16(dev_priv, (reg), (val), true)
-#define I915_READ16_NOTRACE(reg)	i915_read16(dev_priv, (reg), false)
-#define I915_WRITE16_NOTRACE(reg, val)	i915_write16(dev_priv, (reg), (val), false)
-
-#define I915_READ(reg)		i915_read32(dev_priv, (reg), true)
-#define I915_WRITE(reg, val)	i915_write32(dev_priv, (reg), (val), true)
-#define I915_READ_NOTRACE(reg)		i915_read32(dev_priv, (reg), false)
-#define I915_WRITE_NOTRACE(reg, val)	i915_write32(dev_priv, (reg), (val), false)
-
-#define I915_WRITE64(reg, val)	i915_write64(dev_priv, (reg), (val), true)
-#define I915_READ64(reg)	i915_read64(dev_priv, (reg), true)
+#define I915_READ8(reg)		dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
+#define I915_WRITE8(reg, val)	dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
+
+#define I915_READ16(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
+#define I915_WRITE16(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
+#define I915_READ16_NOTRACE(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
+#define I915_WRITE16_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
+
+#define I915_READ(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
+#define I915_WRITE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
+#define I915_READ_NOTRACE(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
+#define I915_WRITE_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
+
+#define I915_WRITE64(reg, val)	dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
+#define I915_READ64(reg)	dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
 
 
 #define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
 #define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
 #define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
 #define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)

+ 340 - 218
drivers/gpu/drm/i915/i915_gem.c

@@ -41,6 +41,9 @@ static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *o
 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
 						   bool force);
 						   bool force);
 static __must_check int
 static __must_check int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+			       bool readonly);
+static __must_check int
 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 			   struct i915_address_space *vm,
 			   struct i915_address_space *vm,
 			   unsigned alignment,
 			   unsigned alignment,
@@ -61,8 +64,8 @@ static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
 					     struct shrink_control *sc);
 					     struct shrink_control *sc);
 static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
 static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
 					    struct shrink_control *sc);
 					    struct shrink_control *sc);
-static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
-static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
+static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
+static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 
 
 static bool cpu_cache_is_coherent(struct drm_device *dev,
 static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -258,7 +261,7 @@ i915_gem_dumb_create(struct drm_file *file,
 		     struct drm_mode_create_dumb *args)
 		     struct drm_mode_create_dumb *args)
 {
 {
 	/* have to work out size/pitch and return them */
 	/* have to work out size/pitch and return them */
-	args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
+	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
 	args->size = args->pitch * args->height;
 	args->size = args->pitch * args->height;
 	return i915_gem_create(file, dev,
 	return i915_gem_create(file, dev,
 			       args->size, &args->handle);
 			       args->size, &args->handle);
@@ -432,11 +435,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
 		 * optimizes for the case when the gpu will dirty the data
 		 * optimizes for the case when the gpu will dirty the data
 		 * anyway again before the next pread happens. */
 		 * anyway again before the next pread happens. */
 		needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
 		needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
-		if (i915_gem_obj_bound_any(obj)) {
-			ret = i915_gem_object_set_to_gtt_domain(obj, false);
-			if (ret)
-				return ret;
-		}
+		ret = i915_gem_object_wait_rendering(obj, true);
+		if (ret)
+			return ret;
 	}
 	}
 
 
 	ret = i915_gem_object_get_pages(obj);
 	ret = i915_gem_object_get_pages(obj);
@@ -748,11 +749,9 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
 		 * optimizes for the case when the gpu will use the data
 		 * optimizes for the case when the gpu will use the data
 		 * right away and we therefore have to clflush anyway. */
 		 * right away and we therefore have to clflush anyway. */
 		needs_clflush_after = cpu_write_needs_clflush(obj);
 		needs_clflush_after = cpu_write_needs_clflush(obj);
-		if (i915_gem_obj_bound_any(obj)) {
-			ret = i915_gem_object_set_to_gtt_domain(obj, true);
-			if (ret)
-				return ret;
-		}
+		ret = i915_gem_object_wait_rendering(obj, false);
+		if (ret)
+			return ret;
 	}
 	}
 	/* Same trick applies to invalidate partially written cachelines read
 	/* Same trick applies to invalidate partially written cachelines read
 	 * before writing. */
 	 * before writing. */
@@ -966,12 +965,31 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
 	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
 	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
 
 
 	ret = 0;
 	ret = 0;
-	if (seqno == ring->outstanding_lazy_request)
+	if (seqno == ring->outstanding_lazy_seqno)
 		ret = i915_add_request(ring, NULL);
 		ret = i915_add_request(ring, NULL);
 
 
 	return ret;
 	return ret;
 }
 }
 
 
+static void fake_irq(unsigned long data)
+{
+	wake_up_process((struct task_struct *)data);
+}
+
+static bool missed_irq(struct drm_i915_private *dev_priv,
+		       struct intel_ring_buffer *ring)
+{
+	return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
+}
+
+static bool can_wait_boost(struct drm_i915_file_private *file_priv)
+{
+	if (file_priv == NULL)
+		return true;
+
+	return !atomic_xchg(&file_priv->rps_wait_boost, true);
+}
+
 /**
 /**
  * __wait_seqno - wait until execution of seqno has finished
  * __wait_seqno - wait until execution of seqno has finished
  * @ring: the ring expected to report seqno
  * @ring: the ring expected to report seqno
@@ -992,13 +1010,14 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
  */
  */
 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
 			unsigned reset_counter,
 			unsigned reset_counter,
-			bool interruptible, struct timespec *timeout)
+			bool interruptible,
+			struct timespec *timeout,
+			struct drm_i915_file_private *file_priv)
 {
 {
 	drm_i915_private_t *dev_priv = ring->dev->dev_private;
 	drm_i915_private_t *dev_priv = ring->dev->dev_private;
-	struct timespec before, now, wait_time={1,0};
-	unsigned long timeout_jiffies;
-	long end;
-	bool wait_forever = true;
+	struct timespec before, now;
+	DEFINE_WAIT(wait);
+	long timeout_jiffies;
 	int ret;
 	int ret;
 
 
 	WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
 	WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
@@ -1006,51 +1025,79 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
 	if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
 	if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
 		return 0;
 		return 0;
 
 
-	trace_i915_gem_request_wait_begin(ring, seqno);
+	timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
 
 
-	if (timeout != NULL) {
-		wait_time = *timeout;
-		wait_forever = false;
+	if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
+		gen6_rps_boost(dev_priv);
+		if (file_priv)
+			mod_delayed_work(dev_priv->wq,
+					 &file_priv->mm.idle_work,
+					 msecs_to_jiffies(100));
 	}
 	}
 
 
-	timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
-
-	if (WARN_ON(!ring->irq_get(ring)))
+	if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) &&
+	    WARN_ON(!ring->irq_get(ring)))
 		return -ENODEV;
 		return -ENODEV;
 
 
-	/* Record current time in case interrupted by signal, or wedged * */
+	/* Record current time in case interrupted by signal, or wedged */
+	trace_i915_gem_request_wait_begin(ring, seqno);
 	getrawmonotonic(&before);
 	getrawmonotonic(&before);
+	for (;;) {
+		struct timer_list timer;
+		unsigned long expire;
 
 
-#define EXIT_COND \
-	(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
-	 i915_reset_in_progress(&dev_priv->gpu_error) || \
-	 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
-	do {
-		if (interruptible)
-			end = wait_event_interruptible_timeout(ring->irq_queue,
-							       EXIT_COND,
-							       timeout_jiffies);
-		else
-			end = wait_event_timeout(ring->irq_queue, EXIT_COND,
-						 timeout_jiffies);
+		prepare_to_wait(&ring->irq_queue, &wait,
+				interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
 
 
 		/* We need to check whether any gpu reset happened in between
 		/* We need to check whether any gpu reset happened in between
 		 * the caller grabbing the seqno and now ... */
 		 * the caller grabbing the seqno and now ... */
-		if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
-			end = -EAGAIN;
+		if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
+			/* ... but upgrade the -EAGAIN to an -EIO if the gpu
+			 * is truely gone. */
+			ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
+			if (ret == 0)
+				ret = -EAGAIN;
+			break;
+		}
 
 
-		/* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
-		 * gone. */
-		ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
-		if (ret)
-			end = ret;
-	} while (end == 0 && wait_forever);
+		if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
+			ret = 0;
+			break;
+		}
 
 
+		if (interruptible && signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+		if (timeout_jiffies <= 0) {
+			ret = -ETIME;
+			break;
+		}
+
+		timer.function = NULL;
+		if (timeout || missed_irq(dev_priv, ring)) {
+			setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
+			expire = jiffies + (missed_irq(dev_priv, ring) ? 1: timeout_jiffies);
+			mod_timer(&timer, expire);
+		}
+
+		io_schedule();
+
+		if (timeout)
+			timeout_jiffies = expire - jiffies;
+
+		if (timer.function) {
+			del_singleshot_timer_sync(&timer);
+			destroy_timer_on_stack(&timer);
+		}
+	}
 	getrawmonotonic(&now);
 	getrawmonotonic(&now);
+	trace_i915_gem_request_wait_end(ring, seqno);
 
 
 	ring->irq_put(ring);
 	ring->irq_put(ring);
-	trace_i915_gem_request_wait_end(ring, seqno);
-#undef EXIT_COND
+
+	finish_wait(&ring->irq_queue, &wait);
 
 
 	if (timeout) {
 	if (timeout) {
 		struct timespec sleep_time = timespec_sub(now, before);
 		struct timespec sleep_time = timespec_sub(now, before);
@@ -1059,17 +1106,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
 			set_normalized_timespec(timeout, 0, 0);
 			set_normalized_timespec(timeout, 0, 0);
 	}
 	}
 
 
-	switch (end) {
-	case -EIO:
-	case -EAGAIN: /* Wedged */
-	case -ERESTARTSYS: /* Signal */
-		return (int)end;
-	case 0: /* Timeout */
-		return -ETIME;
-	default: /* Completed */
-		WARN_ON(end < 0); /* We're not aware of other errors */
-		return 0;
-	}
+	return ret;
 }
 }
 
 
 /**
 /**
@@ -1097,7 +1134,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
 
 
 	return __wait_seqno(ring, seqno,
 	return __wait_seqno(ring, seqno,
 			    atomic_read(&dev_priv->gpu_error.reset_counter),
 			    atomic_read(&dev_priv->gpu_error.reset_counter),
-			    interruptible, NULL);
+			    interruptible, NULL, NULL);
 }
 }
 
 
 static int
 static int
@@ -1147,6 +1184,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
  */
  */
 static __must_check int
 static __must_check int
 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
+					    struct drm_file *file,
 					    bool readonly)
 					    bool readonly)
 {
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_device *dev = obj->base.dev;
@@ -1173,7 +1211,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 
 
 	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
-	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
+	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv);
 	mutex_lock(&dev->struct_mutex);
 	mutex_lock(&dev->struct_mutex);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
@@ -1222,7 +1260,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 	 * We will repeat the flush holding the lock in the normal manner
 	 * We will repeat the flush holding the lock in the normal manner
 	 * to catch cases where we are gazumped.
 	 * to catch cases where we are gazumped.
 	 */
 	 */
-	ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
+	ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain);
 	if (ret)
 	if (ret)
 		goto unref;
 		goto unref;
 
 
@@ -1690,13 +1728,13 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
 	return 0;
 	return 0;
 }
 }
 
 
-static long
+static unsigned long
 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
 		  bool purgeable_only)
 		  bool purgeable_only)
 {
 {
 	struct list_head still_bound_list;
 	struct list_head still_bound_list;
 	struct drm_i915_gem_object *obj, *next;
 	struct drm_i915_gem_object *obj, *next;
-	long count = 0;
+	unsigned long count = 0;
 
 
 	list_for_each_entry_safe(obj, next,
 	list_for_each_entry_safe(obj, next,
 				 &dev_priv->mm.unbound_list,
 				 &dev_priv->mm.unbound_list,
@@ -1762,13 +1800,13 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
 	return count;
 	return count;
 }
 }
 
 
-static long
+static unsigned long
 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
 {
 {
 	return __i915_gem_shrink(dev_priv, target, true);
 	return __i915_gem_shrink(dev_priv, target, true);
 }
 }
 
 
-static long
+static unsigned long
 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 {
 {
 	struct drm_i915_gem_object *obj, *next;
 	struct drm_i915_gem_object *obj, *next;
@@ -1778,9 +1816,8 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 
 
 	list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
 	list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
 				 global_list) {
 				 global_list) {
-		if (obj->pages_pin_count == 0)
+		if (i915_gem_object_put_pages(obj) == 0)
 			freed += obj->base.size >> PAGE_SHIFT;
 			freed += obj->base.size >> PAGE_SHIFT;
-		i915_gem_object_put_pages(obj);
 	}
 	}
 	return freed;
 	return freed;
 }
 }
@@ -1865,6 +1902,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 			sg->length += PAGE_SIZE;
 			sg->length += PAGE_SIZE;
 		}
 		}
 		last_pfn = page_to_pfn(page);
 		last_pfn = page_to_pfn(page);
+
+		/* Check that the i965g/gm workaround works. */
+		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
 	}
 	}
 #ifdef CONFIG_SWIOTLB
 #ifdef CONFIG_SWIOTLB
 	if (!swiotlb_nr_tbl())
 	if (!swiotlb_nr_tbl())
@@ -1918,7 +1958,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 	return 0;
 	return 0;
 }
 }
 
 
-void
+static void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 			       struct intel_ring_buffer *ring)
 			       struct intel_ring_buffer *ring)
 {
 {
@@ -1957,6 +1997,13 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 	}
 	}
 }
 }
 
 
+void i915_vma_move_to_active(struct i915_vma *vma,
+			     struct intel_ring_buffer *ring)
+{
+	list_move_tail(&vma->mm_list, &vma->vm->active_list);
+	return i915_gem_object_move_to_active(vma->obj, ring);
+}
+
 static void
 static void
 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
 {
@@ -2078,11 +2125,10 @@ int __i915_add_request(struct intel_ring_buffer *ring,
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	request = kmalloc(sizeof(*request), GFP_KERNEL);
-	if (request == NULL)
+	request = ring->preallocated_lazy_request;
+	if (WARN_ON(request == NULL))
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-
 	/* Record the position of the start of the request so that
 	/* Record the position of the start of the request so that
 	 * should we detect the updated seqno part-way through the
 	 * should we detect the updated seqno part-way through the
 	 * GPU processing the request, we never over-estimate the
 	 * GPU processing the request, we never over-estimate the
@@ -2091,17 +2137,13 @@ int __i915_add_request(struct intel_ring_buffer *ring,
 	request_ring_position = intel_ring_get_tail(ring);
 	request_ring_position = intel_ring_get_tail(ring);
 
 
 	ret = ring->add_request(ring);
 	ret = ring->add_request(ring);
-	if (ret) {
-		kfree(request);
+	if (ret)
 		return ret;
 		return ret;
-	}
 
 
 	request->seqno = intel_ring_get_seqno(ring);
 	request->seqno = intel_ring_get_seqno(ring);
 	request->ring = ring;
 	request->ring = ring;
 	request->head = request_start;
 	request->head = request_start;
 	request->tail = request_ring_position;
 	request->tail = request_ring_position;
-	request->ctx = ring->last_context;
-	request->batch_obj = obj;
 
 
 	/* Whilst this request exists, batch_obj will be on the
 	/* Whilst this request exists, batch_obj will be on the
 	 * active_list, and so will hold the active reference. Only when this
 	 * active_list, and so will hold the active reference. Only when this
@@ -2109,7 +2151,12 @@ int __i915_add_request(struct intel_ring_buffer *ring,
 	 * inactive_list and lose its active reference. Hence we do not need
 	 * inactive_list and lose its active reference. Hence we do not need
 	 * to explicitly hold another reference here.
 	 * to explicitly hold another reference here.
 	 */
 	 */
+	request->batch_obj = obj;
 
 
+	/* Hold a reference to the current context so that we can inspect
+	 * it later in case a hangcheck error event fires.
+	 */
+	request->ctx = ring->last_context;
 	if (request->ctx)
 	if (request->ctx)
 		i915_gem_context_reference(request->ctx);
 		i915_gem_context_reference(request->ctx);
 
 
@@ -2129,12 +2176,14 @@ int __i915_add_request(struct intel_ring_buffer *ring,
 	}
 	}
 
 
 	trace_i915_gem_request_add(ring, request->seqno);
 	trace_i915_gem_request_add(ring, request->seqno);
-	ring->outstanding_lazy_request = 0;
+	ring->outstanding_lazy_seqno = 0;
+	ring->preallocated_lazy_request = NULL;
 
 
 	if (!dev_priv->ums.mm_suspended) {
 	if (!dev_priv->ums.mm_suspended) {
 		i915_queue_hangcheck(ring->dev);
 		i915_queue_hangcheck(ring->dev);
 
 
 		if (was_empty) {
 		if (was_empty) {
+			cancel_delayed_work_sync(&dev_priv->mm.idle_work);
 			queue_delayed_work(dev_priv->wq,
 			queue_delayed_work(dev_priv->wq,
 					   &dev_priv->mm.retire_work,
 					   &dev_priv->mm.retire_work,
 					   round_jiffies_up_relative(HZ));
 					   round_jiffies_up_relative(HZ));
@@ -2156,10 +2205,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
 		return;
 		return;
 
 
 	spin_lock(&file_priv->mm.lock);
 	spin_lock(&file_priv->mm.lock);
-	if (request->file_priv) {
-		list_del(&request->client_list);
-		request->file_priv = NULL;
-	}
+	list_del(&request->client_list);
+	request->file_priv = NULL;
 	spin_unlock(&file_priv->mm.lock);
 	spin_unlock(&file_priv->mm.lock);
 }
 }
 
 
@@ -2224,6 +2271,21 @@ static bool i915_request_guilty(struct drm_i915_gem_request *request,
 	return false;
 	return false;
 }
 }
 
 
+static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
+{
+	const unsigned long elapsed = get_seconds() - hs->guilty_ts;
+
+	if (hs->banned)
+		return true;
+
+	if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
+		DRM_ERROR("context hanging too fast, declaring banned!\n");
+		return true;
+	}
+
+	return false;
+}
+
 static void i915_set_reset_status(struct intel_ring_buffer *ring,
 static void i915_set_reset_status(struct intel_ring_buffer *ring,
 				  struct drm_i915_gem_request *request,
 				  struct drm_i915_gem_request *request,
 				  u32 acthd)
 				  u32 acthd)
@@ -2260,10 +2322,13 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
 		hs = &request->file_priv->hang_stats;
 		hs = &request->file_priv->hang_stats;
 
 
 	if (hs) {
 	if (hs) {
-		if (guilty)
+		if (guilty) {
+			hs->banned = i915_context_is_banned(hs);
 			hs->batch_active++;
 			hs->batch_active++;
-		else
+			hs->guilty_ts = get_seconds();
+		} else {
 			hs->batch_pending++;
 			hs->batch_pending++;
+		}
 	}
 	}
 }
 }
 
 
@@ -2341,6 +2406,8 @@ void i915_gem_reset(struct drm_device *dev)
 	for_each_ring(ring, dev_priv, i)
 	for_each_ring(ring, dev_priv, i)
 		i915_gem_reset_ring_lists(dev_priv, ring);
 		i915_gem_reset_ring_lists(dev_priv, ring);
 
 
+	i915_gem_cleanup_ringbuffer(dev);
+
 	i915_gem_restore_fences(dev);
 	i915_gem_restore_fences(dev);
 }
 }
 
 
@@ -2405,57 +2472,53 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 	WARN_ON(i915_verify_lists(ring->dev));
 	WARN_ON(i915_verify_lists(ring->dev));
 }
 }
 
 
-void
+bool
 i915_gem_retire_requests(struct drm_device *dev)
 i915_gem_retire_requests(struct drm_device *dev)
 {
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_ring_buffer *ring;
 	struct intel_ring_buffer *ring;
+	bool idle = true;
 	int i;
 	int i;
 
 
-	for_each_ring(ring, dev_priv, i)
+	for_each_ring(ring, dev_priv, i) {
 		i915_gem_retire_requests_ring(ring);
 		i915_gem_retire_requests_ring(ring);
+		idle &= list_empty(&ring->request_list);
+	}
+
+	if (idle)
+		mod_delayed_work(dev_priv->wq,
+				   &dev_priv->mm.idle_work,
+				   msecs_to_jiffies(100));
+
+	return idle;
 }
 }
 
 
 static void
 static void
 i915_gem_retire_work_handler(struct work_struct *work)
 i915_gem_retire_work_handler(struct work_struct *work)
 {
 {
-	drm_i915_private_t *dev_priv;
-	struct drm_device *dev;
-	struct intel_ring_buffer *ring;
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv), mm.retire_work.work);
+	struct drm_device *dev = dev_priv->dev;
 	bool idle;
 	bool idle;
-	int i;
-
-	dev_priv = container_of(work, drm_i915_private_t,
-				mm.retire_work.work);
-	dev = dev_priv->dev;
 
 
 	/* Come back later if the device is busy... */
 	/* Come back later if the device is busy... */
-	if (!mutex_trylock(&dev->struct_mutex)) {
-		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
-				   round_jiffies_up_relative(HZ));
-		return;
-	}
-
-	i915_gem_retire_requests(dev);
-
-	/* Send a periodic flush down the ring so we don't hold onto GEM
-	 * objects indefinitely.
-	 */
-	idle = true;
-	for_each_ring(ring, dev_priv, i) {
-		if (ring->gpu_caches_dirty)
-			i915_add_request(ring, NULL);
-
-		idle &= list_empty(&ring->request_list);
+	idle = false;
+	if (mutex_trylock(&dev->struct_mutex)) {
+		idle = i915_gem_retire_requests(dev);
+		mutex_unlock(&dev->struct_mutex);
 	}
 	}
-
-	if (!dev_priv->ums.mm_suspended && !idle)
+	if (!idle)
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
 				   round_jiffies_up_relative(HZ));
 				   round_jiffies_up_relative(HZ));
-	if (idle)
-		intel_mark_idle(dev);
+}
 
 
-	mutex_unlock(&dev->struct_mutex);
+static void
+i915_gem_idle_work_handler(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv), mm.idle_work.work);
+
+	intel_mark_idle(dev_priv->dev);
 }
 }
 
 
 /**
 /**
@@ -2553,7 +2616,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
 
 
-	ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
+	ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
 	if (timeout)
 	if (timeout)
 		args->timeout_ns = timespec_to_ns(timeout);
 		args->timeout_ns = timespec_to_ns(timeout);
 	return ret;
 	return ret;
@@ -2600,6 +2663,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
+	trace_i915_gem_ring_sync_to(from, to, seqno);
 	ret = to->sync_to(to, from, seqno);
 	ret = to->sync_to(to, from, seqno);
 	if (!ret)
 	if (!ret)
 		/* We use last_read_seqno because sync_to()
 		/* We use last_read_seqno because sync_to()
@@ -2641,11 +2705,17 @@ int i915_vma_unbind(struct i915_vma *vma)
 	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
 	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
 	int ret;
 	int ret;
 
 
+	/* For now we only ever use 1 vma per object */
+	WARN_ON(!list_is_singular(&obj->vma_list));
+
 	if (list_empty(&vma->vma_link))
 	if (list_empty(&vma->vma_link))
 		return 0;
 		return 0;
 
 
-	if (!drm_mm_node_allocated(&vma->node))
-		goto destroy;
+	if (!drm_mm_node_allocated(&vma->node)) {
+		i915_gem_vma_destroy(vma);
+
+		return 0;
+	}
 
 
 	if (obj->pin_count)
 	if (obj->pin_count)
 		return -EBUSY;
 		return -EBUSY;
@@ -2685,13 +2755,10 @@ int i915_vma_unbind(struct i915_vma *vma)
 
 
 	drm_mm_remove_node(&vma->node);
 	drm_mm_remove_node(&vma->node);
 
 
-destroy:
 	i915_gem_vma_destroy(vma);
 	i915_gem_vma_destroy(vma);
 
 
 	/* Since the unbound list is global, only move to that list if
 	/* Since the unbound list is global, only move to that list if
-	 * no more VMAs exist.
-	 * NB: Until we have real VMAs there will only ever be one */
-	WARN_ON(!list_empty(&obj->vma_list));
+	 * no more VMAs exist. */
 	if (list_empty(&obj->vma_list))
 	if (list_empty(&obj->vma_list))
 		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
 		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
 
 
@@ -2887,6 +2954,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
 	     obj->stride, obj->tiling_mode);
 	     obj->stride, obj->tiling_mode);
 
 
 	switch (INTEL_INFO(dev)->gen) {
 	switch (INTEL_INFO(dev)->gen) {
+	case 8:
 	case 7:
 	case 7:
 	case 6:
 	case 6:
 	case 5:
 	case 5:
@@ -3389,8 +3457,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 
 
 	/* And bump the LRU for this access */
 	/* And bump the LRU for this access */
 	if (i915_gem_object_is_inactive(obj)) {
 	if (i915_gem_object_is_inactive(obj)) {
-		struct i915_vma *vma = i915_gem_obj_to_vma(obj,
-							   &dev_priv->gtt.base);
+		struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
 		if (vma)
 		if (vma)
 			list_move_tail(&vma->mm_list,
 			list_move_tail(&vma->mm_list,
 				       &dev_priv->gtt.base.inactive_list);
 				       &dev_priv->gtt.base.inactive_list);
@@ -3761,7 +3828,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 	if (seqno == 0)
 	if (seqno == 0)
 		return 0;
 		return 0;
 
 
-	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
+	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
 	if (ret == 0)
 	if (ret == 0)
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
 
@@ -3865,6 +3932,11 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
 		goto out;
 		goto out;
 	}
 	}
 
 
+	if (obj->user_pin_count == ULONG_MAX) {
+		ret = -EBUSY;
+		goto out;
+	}
+
 	if (obj->user_pin_count == 0) {
 	if (obj->user_pin_count == 0) {
 		ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
 		ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
 		if (ret)
 		if (ret)
@@ -4015,7 +4087,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 {
 {
 	INIT_LIST_HEAD(&obj->global_list);
 	INIT_LIST_HEAD(&obj->global_list);
 	INIT_LIST_HEAD(&obj->ring_list);
 	INIT_LIST_HEAD(&obj->ring_list);
-	INIT_LIST_HEAD(&obj->exec_list);
 	INIT_LIST_HEAD(&obj->obj_exec_link);
 	INIT_LIST_HEAD(&obj->obj_exec_link);
 	INIT_LIST_HEAD(&obj->vma_list);
 	INIT_LIST_HEAD(&obj->vma_list);
 
 
@@ -4087,13 +4158,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 	return obj;
 	return obj;
 }
 }
 
 
-int i915_gem_init_object(struct drm_gem_object *obj)
-{
-	BUG();
-
-	return 0;
-}
-
 void i915_gem_free_object(struct drm_gem_object *gem_obj)
 void i915_gem_free_object(struct drm_gem_object *gem_obj)
 {
 {
 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
@@ -4147,8 +4211,19 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 	i915_gem_object_free(obj);
 	i915_gem_object_free(obj);
 }
 }
 
 
-struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
 				     struct i915_address_space *vm)
 				     struct i915_address_space *vm)
+{
+	struct i915_vma *vma;
+	list_for_each_entry(vma, &obj->vma_list, vma_link)
+		if (vma->vm == vm)
+			return vma;
+
+	return NULL;
+}
+
+static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
+					      struct i915_address_space *vm)
 {
 {
 	struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
 	struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
 	if (vma == NULL)
 	if (vma == NULL)
@@ -4169,76 +4244,103 @@ struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
 	return vma;
 	return vma;
 }
 }
 
 
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+				  struct i915_address_space *vm)
+{
+	struct i915_vma *vma;
+
+	vma = i915_gem_obj_to_vma(obj, vm);
+	if (!vma)
+		vma = __i915_gem_vma_create(obj, vm);
+
+	return vma;
+}
+
 void i915_gem_vma_destroy(struct i915_vma *vma)
 void i915_gem_vma_destroy(struct i915_vma *vma)
 {
 {
 	WARN_ON(vma->node.allocated);
 	WARN_ON(vma->node.allocated);
+
+	/* Keep the vma as a placeholder in the execbuffer reservation lists */
+	if (!list_empty(&vma->exec_list))
+		return;
+
 	list_del(&vma->vma_link);
 	list_del(&vma->vma_link);
+
 	kfree(vma);
 	kfree(vma);
 }
 }
 
 
 int
 int
-i915_gem_idle(struct drm_device *dev)
+i915_gem_suspend(struct drm_device *dev)
 {
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int ret;
+	int ret = 0;
 
 
-	if (dev_priv->ums.mm_suspended) {
-		mutex_unlock(&dev->struct_mutex);
-		return 0;
-	}
+	mutex_lock(&dev->struct_mutex);
+	if (dev_priv->ums.mm_suspended)
+		goto err;
 
 
 	ret = i915_gpu_idle(dev);
 	ret = i915_gpu_idle(dev);
-	if (ret) {
-		mutex_unlock(&dev->struct_mutex);
-		return ret;
-	}
+	if (ret)
+		goto err;
+
 	i915_gem_retire_requests(dev);
 	i915_gem_retire_requests(dev);
 
 
 	/* Under UMS, be paranoid and evict. */
 	/* Under UMS, be paranoid and evict. */
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		i915_gem_evict_everything(dev);
 		i915_gem_evict_everything(dev);
 
 
-	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
-
 	i915_kernel_lost_context(dev);
 	i915_kernel_lost_context(dev);
 	i915_gem_cleanup_ringbuffer(dev);
 	i915_gem_cleanup_ringbuffer(dev);
 
 
-	/* Cancel the retire work handler, which should be idle now. */
+	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
+	 * We need to replace this with a semaphore, or something.
+	 * And not confound ums.mm_suspended!
+	 */
+	dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
+							     DRIVER_MODESET);
+	mutex_unlock(&dev->struct_mutex);
+
+	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
 	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
 	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
+	cancel_delayed_work_sync(&dev_priv->mm.idle_work);
 
 
 	return 0;
 	return 0;
+
+err:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
 }
 }
 
 
-void i915_gem_l3_remap(struct drm_device *dev)
+int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
 {
 {
+	struct drm_device *dev = ring->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	u32 misccpctl;
-	int i;
-
-	if (!HAS_L3_GPU_CACHE(dev))
-		return;
+	u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
+	u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
+	int i, ret;
 
 
-	if (!dev_priv->l3_parity.remap_info)
-		return;
+	if (!HAS_L3_DPF(dev) || !remap_info)
+		return 0;
 
 
-	misccpctl = I915_READ(GEN7_MISCCPCTL);
-	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
-	POSTING_READ(GEN7_MISCCPCTL);
+	ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
+	if (ret)
+		return ret;
 
 
+	/*
+	 * Note: We do not worry about the concurrent register cacheline hang
+	 * here because no other code should access these registers other than
+	 * at initialization time.
+	 */
 	for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
 	for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
-		u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
-		if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
-			DRM_DEBUG("0x%x was already programmed to %x\n",
-				  GEN7_L3LOG_BASE + i, remap);
-		if (remap && !dev_priv->l3_parity.remap_info[i/4])
-			DRM_DEBUG_DRIVER("Clearing remapped register\n");
-		I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(ring, reg_base + i);
+		intel_ring_emit(ring, remap_info[i/4]);
 	}
 	}
 
 
-	/* Make sure all the writes land before disabling dop clock gating */
-	POSTING_READ(GEN7_L3LOG_BASE);
+	intel_ring_advance(ring);
 
 
-	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+	return ret;
 }
 }
 
 
 void i915_gem_init_swizzling(struct drm_device *dev)
 void i915_gem_init_swizzling(struct drm_device *dev)
@@ -4260,6 +4362,8 @@ void i915_gem_init_swizzling(struct drm_device *dev)
 		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
 		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
 	else if (IS_GEN7(dev))
 	else if (IS_GEN7(dev))
 		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
 		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
+	else if (IS_GEN8(dev))
+		I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
 	else
 	else
 		BUG();
 		BUG();
 }
 }
@@ -4330,7 +4434,7 @@ int
 i915_gem_init_hw(struct drm_device *dev)
 i915_gem_init_hw(struct drm_device *dev)
 {
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int ret;
+	int ret, i;
 
 
 	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
 	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
 		return -EIO;
 		return -EIO;
@@ -4338,20 +4442,26 @@ i915_gem_init_hw(struct drm_device *dev)
 	if (dev_priv->ellc_size)
 	if (dev_priv->ellc_size)
 		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
 		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
 
 
+	if (IS_HSW_GT3(dev))
+		I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED);
+	else
+		I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
+
 	if (HAS_PCH_NOP(dev)) {
 	if (HAS_PCH_NOP(dev)) {
 		u32 temp = I915_READ(GEN7_MSG_CTL);
 		u32 temp = I915_READ(GEN7_MSG_CTL);
 		temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
 		temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
 		I915_WRITE(GEN7_MSG_CTL, temp);
 		I915_WRITE(GEN7_MSG_CTL, temp);
 	}
 	}
 
 
-	i915_gem_l3_remap(dev);
-
 	i915_gem_init_swizzling(dev);
 	i915_gem_init_swizzling(dev);
 
 
 	ret = i915_gem_init_rings(dev);
 	ret = i915_gem_init_rings(dev);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
+	for (i = 0; i < NUM_L3_SLICES(dev); i++)
+		i915_gem_l3_remap(&dev_priv->ring[RCS], i);
+
 	/*
 	/*
 	 * XXX: There was some w/a described somewhere suggesting loading
 	 * XXX: There was some w/a described somewhere suggesting loading
 	 * contexts before PPGTT.
 	 * contexts before PPGTT.
@@ -4454,26 +4564,12 @@ int
 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
 		       struct drm_file *file_priv)
 		       struct drm_file *file_priv)
 {
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret;
-
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return 0;
 		return 0;
 
 
 	drm_irq_uninstall(dev);
 	drm_irq_uninstall(dev);
 
 
-	mutex_lock(&dev->struct_mutex);
-	ret =  i915_gem_idle(dev);
-
-	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
-	 * We need to replace this with a semaphore, or something.
-	 * And not confound ums.mm_suspended!
-	 */
-	if (ret != 0)
-		dev_priv->ums.mm_suspended = 1;
-	mutex_unlock(&dev->struct_mutex);
-
-	return ret;
+	return i915_gem_suspend(dev);
 }
 }
 
 
 void
 void
@@ -4484,11 +4580,9 @@ i915_gem_lastclose(struct drm_device *dev)
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
 		return;
 
 
-	mutex_lock(&dev->struct_mutex);
-	ret = i915_gem_idle(dev);
+	ret = i915_gem_suspend(dev);
 	if (ret)
 	if (ret)
 		DRM_ERROR("failed to idle hardware: %d\n", ret);
 		DRM_ERROR("failed to idle hardware: %d\n", ret);
-	mutex_unlock(&dev->struct_mutex);
 }
 }
 
 
 static void
 static void
@@ -4523,6 +4617,7 @@ i915_gem_load(struct drm_device *dev)
 	INIT_LIST_HEAD(&dev_priv->vm_list);
 	INIT_LIST_HEAD(&dev_priv->vm_list);
 	i915_init_vm(dev_priv, &dev_priv->gtt.base);
 	i915_init_vm(dev_priv, &dev_priv->gtt.base);
 
 
+	INIT_LIST_HEAD(&dev_priv->context_list);
 	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4532,6 +4627,8 @@ i915_gem_load(struct drm_device *dev)
 		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
 		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
 	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
 	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
 			  i915_gem_retire_work_handler);
 			  i915_gem_retire_work_handler);
+	INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
+			  i915_gem_idle_work_handler);
 	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
 	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
 
 
 	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
 	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
@@ -4582,7 +4679,7 @@ static int i915_gem_init_phys_object(struct drm_device *dev,
 	if (dev_priv->mm.phys_objs[id - 1] || !size)
 	if (dev_priv->mm.phys_objs[id - 1] || !size)
 		return 0;
 		return 0;
 
 
-	phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
+	phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
 	if (!phys_obj)
 	if (!phys_obj)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
@@ -4756,6 +4853,8 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 {
 {
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 
 
+	cancel_delayed_work_sync(&file_priv->mm.idle_work);
+
 	/* Clean up our request list when the client is going away, so that
 	/* Clean up our request list when the client is going away, so that
 	 * later retire_requests won't dereference our soon-to-be-gone
 	 * later retire_requests won't dereference our soon-to-be-gone
 	 * file_priv.
 	 * file_priv.
@@ -4773,6 +4872,38 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 	spin_unlock(&file_priv->mm.lock);
 	spin_unlock(&file_priv->mm.lock);
 }
 }
 
 
+static void
+i915_gem_file_idle_work_handler(struct work_struct *work)
+{
+	struct drm_i915_file_private *file_priv =
+		container_of(work, typeof(*file_priv), mm.idle_work.work);
+
+	atomic_set(&file_priv->rps_wait_boost, false);
+}
+
+int i915_gem_open(struct drm_device *dev, struct drm_file *file)
+{
+	struct drm_i915_file_private *file_priv;
+
+	DRM_DEBUG_DRIVER("\n");
+
+	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+	if (!file_priv)
+		return -ENOMEM;
+
+	file->driver_priv = file_priv;
+	file_priv->dev_priv = dev->dev_private;
+
+	spin_lock_init(&file_priv->mm.lock);
+	INIT_LIST_HEAD(&file_priv->mm.request_list);
+	INIT_DELAYED_WORK(&file_priv->mm.idle_work,
+			  i915_gem_file_idle_work_handler);
+
+	idr_init(&file_priv->context_idr);
+
+	return 0;
+}
+
 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
 {
 {
 	if (!mutex_is_locked(mutex))
 	if (!mutex_is_locked(mutex))
@@ -4823,6 +4954,7 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
 
 
 	if (unlock)
 	if (unlock)
 		mutex_unlock(&dev->struct_mutex);
 		mutex_unlock(&dev->struct_mutex);
+
 	return count;
 	return count;
 }
 }
 
 
@@ -4859,11 +4991,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
 
 
 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
 {
 {
-	struct drm_i915_private *dev_priv = o->base.dev->dev_private;
-	struct i915_address_space *vm;
+	struct i915_vma *vma;
 
 
-	list_for_each_entry(vm, &dev_priv->vm_list, global_link)
-		if (i915_gem_obj_bound(o, vm))
+	list_for_each_entry(vma, &o->vma_list, vma_link)
+		if (drm_mm_node_allocated(&vma->node))
 			return true;
 			return true;
 
 
 	return false;
 	return false;
@@ -4895,7 +5026,6 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
 			     struct drm_i915_private,
 			     struct drm_i915_private,
 			     mm.inactive_shrinker);
 			     mm.inactive_shrinker);
 	struct drm_device *dev = dev_priv->dev;
 	struct drm_device *dev = dev_priv->dev;
-	int nr_to_scan = sc->nr_to_scan;
 	unsigned long freed;
 	unsigned long freed;
 	bool unlock = true;
 	bool unlock = true;
 
 
@@ -4909,38 +5039,30 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
 		unlock = false;
 		unlock = false;
 	}
 	}
 
 
-	freed = i915_gem_purge(dev_priv, nr_to_scan);
-	if (freed < nr_to_scan)
-		freed += __i915_gem_shrink(dev_priv, nr_to_scan,
-							false);
-	if (freed < nr_to_scan)
+	freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
+	if (freed < sc->nr_to_scan)
+		freed += __i915_gem_shrink(dev_priv,
+					   sc->nr_to_scan - freed,
+					   false);
+	if (freed < sc->nr_to_scan)
 		freed += i915_gem_shrink_all(dev_priv);
 		freed += i915_gem_shrink_all(dev_priv);
 
 
 	if (unlock)
 	if (unlock)
 		mutex_unlock(&dev->struct_mutex);
 		mutex_unlock(&dev->struct_mutex);
+
 	return freed;
 	return freed;
 }
 }
 
 
-struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
-				     struct i915_address_space *vm)
+struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
 {
 {
 	struct i915_vma *vma;
 	struct i915_vma *vma;
-	list_for_each_entry(vma, &obj->vma_list, vma_link)
-		if (vma->vm == vm)
-			return vma;
 
 
-	return NULL;
-}
-
-struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
-				  struct i915_address_space *vm)
-{
-	struct i915_vma *vma;
+	if (WARN_ON(list_empty(&obj->vma_list)))
+		return NULL;
 
 
-	vma = i915_gem_obj_to_vma(obj, vm);
-	if (!vma)
-		vma = i915_gem_vma_create(obj, vm);
+	vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
+	if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
+		return NULL;
 
 
 	return vma;
 	return vma;
 }
 }

+ 40 - 24
drivers/gpu/drm/i915/i915_gem_context.c

@@ -73,7 +73,7 @@
  *
  *
  * There are two confusing terms used above:
  * There are two confusing terms used above:
  *  The "current context" means the context which is currently running on the
  *  The "current context" means the context which is currently running on the
- *  GPU. The GPU has loaded it's state already and has stored away the gtt
+ *  GPU. The GPU has loaded its state already and has stored away the gtt
  *  offset of the BO. The GPU is not actively referencing the data at this
  *  offset of the BO. The GPU is not actively referencing the data at this
  *  offset, but it will on the next context switch. The only way to avoid this
  *  offset, but it will on the next context switch. The only way to avoid this
  *  is to do a GPU reset.
  *  is to do a GPU reset.
@@ -117,6 +117,9 @@ static int get_context_size(struct drm_device *dev)
 		else
 		else
 			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
 			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
 		break;
 		break;
+	case 8:
+		ret = GEN8_CXT_TOTAL_SIZE;
+		break;
 	default:
 	default:
 		BUG();
 		BUG();
 	}
 	}
@@ -129,6 +132,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
 	struct i915_hw_context *ctx = container_of(ctx_ref,
 	struct i915_hw_context *ctx = container_of(ctx_ref,
 						   typeof(*ctx), ref);
 						   typeof(*ctx), ref);
 
 
+	list_del(&ctx->link);
 	drm_gem_object_unreference(&ctx->obj->base);
 	drm_gem_object_unreference(&ctx->obj->base);
 	kfree(ctx);
 	kfree(ctx);
 }
 }
@@ -147,6 +151,7 @@ create_hw_context(struct drm_device *dev,
 
 
 	kref_init(&ctx->ref);
 	kref_init(&ctx->ref);
 	ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
 	ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
+	INIT_LIST_HEAD(&ctx->link);
 	if (ctx->obj == NULL) {
 	if (ctx->obj == NULL) {
 		kfree(ctx);
 		kfree(ctx);
 		DRM_DEBUG_DRIVER("Context object allocated failed\n");
 		DRM_DEBUG_DRIVER("Context object allocated failed\n");
@@ -166,6 +171,7 @@ create_hw_context(struct drm_device *dev,
 	 * assertion in the context switch code.
 	 * assertion in the context switch code.
 	 */
 	 */
 	ctx->ring = &dev_priv->ring[RCS];
 	ctx->ring = &dev_priv->ring[RCS];
+	list_add_tail(&ctx->link, &dev_priv->context_list);
 
 
 	/* Default context will never have a file_priv */
 	/* Default context will never have a file_priv */
 	if (file_priv == NULL)
 	if (file_priv == NULL)
@@ -178,6 +184,10 @@ create_hw_context(struct drm_device *dev,
 
 
 	ctx->file_priv = file_priv;
 	ctx->file_priv = file_priv;
 	ctx->id = ret;
 	ctx->id = ret;
+	/* NB: Mark all slices as needing a remap so that when the context first
+	 * loads it will restore whatever remap state already exists. If there
+	 * is no remap info, it will be a NOP. */
+	ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
 
 
 	return ctx;
 	return ctx;
 
 
@@ -213,7 +223,6 @@ static int create_default_context(struct drm_i915_private *dev_priv)
 	 * may not be available. To avoid this we always pin the
 	 * may not be available. To avoid this we always pin the
 	 * default context.
 	 * default context.
 	 */
 	 */
-	dev_priv->ring[RCS].default_context = ctx;
 	ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
 	ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
 	if (ret) {
 	if (ret) {
 		DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
 		DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
@@ -226,6 +235,8 @@ static int create_default_context(struct drm_i915_private *dev_priv)
 		goto err_unpin;
 		goto err_unpin;
 	}
 	}
 
 
+	dev_priv->ring[RCS].default_context = ctx;
+
 	DRM_DEBUG_DRIVER("Default HW context loaded\n");
 	DRM_DEBUG_DRIVER("Default HW context loaded\n");
 	return 0;
 	return 0;
 
 
@@ -281,16 +292,24 @@ void i915_gem_context_fini(struct drm_device *dev)
 	 * other code, leading to spurious errors. */
 	 * other code, leading to spurious errors. */
 	intel_gpu_reset(dev);
 	intel_gpu_reset(dev);
 
 
-	i915_gem_object_unpin(dctx->obj);
-
 	/* When default context is created and switched to, base object refcount
 	/* When default context is created and switched to, base object refcount
 	 * will be 2 (+1 from object creation and +1 from do_switch()).
 	 * will be 2 (+1 from object creation and +1 from do_switch()).
 	 * i915_gem_context_fini() will be called after gpu_idle() has switched
 	 * i915_gem_context_fini() will be called after gpu_idle() has switched
 	 * to default context. So we need to unreference the base object once
 	 * to default context. So we need to unreference the base object once
 	 * to offset the do_switch part, so that i915_gem_context_unreference()
 	 * to offset the do_switch part, so that i915_gem_context_unreference()
 	 * can then free the base object correctly. */
 	 * can then free the base object correctly. */
-	drm_gem_object_unreference(&dctx->obj->base);
+	WARN_ON(!dev_priv->ring[RCS].last_context);
+	if (dev_priv->ring[RCS].last_context == dctx) {
+		/* Fake switch to NULL context */
+		WARN_ON(dctx->obj->active);
+		i915_gem_object_unpin(dctx->obj);
+		i915_gem_context_unreference(dctx);
+	}
+
+	i915_gem_object_unpin(dctx->obj);
 	i915_gem_context_unreference(dctx);
 	i915_gem_context_unreference(dctx);
+	dev_priv->ring[RCS].default_context = NULL;
+	dev_priv->ring[RCS].last_context = NULL;
 }
 }
 
 
 static int context_idr_cleanup(int id, void *p, void *data)
 static int context_idr_cleanup(int id, void *p, void *data)
@@ -393,11 +412,11 @@ static int do_switch(struct i915_hw_context *to)
 	struct intel_ring_buffer *ring = to->ring;
 	struct intel_ring_buffer *ring = to->ring;
 	struct i915_hw_context *from = ring->last_context;
 	struct i915_hw_context *from = ring->last_context;
 	u32 hw_flags = 0;
 	u32 hw_flags = 0;
-	int ret;
+	int ret, i;
 
 
 	BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
 	BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
 
 
-	if (from == to)
+	if (from == to && !to->remap_slice)
 		return 0;
 		return 0;
 
 
 	ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
 	ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
@@ -420,8 +439,6 @@ static int do_switch(struct i915_hw_context *to)
 
 
 	if (!to->is_initialized || is_default_context(to))
 	if (!to->is_initialized || is_default_context(to))
 		hw_flags |= MI_RESTORE_INHIBIT;
 		hw_flags |= MI_RESTORE_INHIBIT;
-	else if (WARN_ON_ONCE(from == to)) /* not yet expected */
-		hw_flags |= MI_FORCE_RESTORE;
 
 
 	ret = mi_set_context(ring, to, hw_flags);
 	ret = mi_set_context(ring, to, hw_flags);
 	if (ret) {
 	if (ret) {
@@ -429,6 +446,18 @@ static int do_switch(struct i915_hw_context *to)
 		return ret;
 		return ret;
 	}
 	}
 
 
+	for (i = 0; i < MAX_L3_SLICES; i++) {
+		if (!(to->remap_slice & (1<<i)))
+			continue;
+
+		ret = i915_gem_l3_remap(ring, i);
+		/* If it failed, try again next round */
+		if (ret)
+			DRM_DEBUG_DRIVER("L3 remapping failed\n");
+		else
+			to->remap_slice &= ~(1<<i);
+	}
+
 	/* The backing object for the context is done after switching to the
 	/* The backing object for the context is done after switching to the
 	 * *next* context. Therefore we cannot retire the previous context until
 	 * *next* context. Therefore we cannot retire the previous context until
 	 * the next context has already started running. In fact, the below code
 	 * the next context has already started running. In fact, the below code
@@ -436,11 +465,8 @@ static int do_switch(struct i915_hw_context *to)
 	 * MI_SET_CONTEXT instead of when the next seqno has completed.
 	 * MI_SET_CONTEXT instead of when the next seqno has completed.
 	 */
 	 */
 	if (from != NULL) {
 	if (from != NULL) {
-		struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
-		struct i915_address_space *ggtt = &dev_priv->gtt.base;
 		from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
 		from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-		list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
-		i915_gem_object_move_to_active(from->obj, ring);
+		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
 		 * whole damn pipeline, we don't need to explicitly mark the
 		 * whole damn pipeline, we don't need to explicitly mark the
 		 * object dirty. The only exception is that the context must be
 		 * object dirty. The only exception is that the context must be
@@ -451,17 +477,7 @@ static int do_switch(struct i915_hw_context *to)
 		from->obj->dirty = 1;
 		from->obj->dirty = 1;
 		BUG_ON(from->obj->ring != ring);
 		BUG_ON(from->obj->ring != ring);
 
 
-		ret = i915_add_request(ring, NULL);
-		if (ret) {
-			/* Too late, we've already scheduled a context switch.
-			 * Try to undo the change so that the hw state is
-			 * consistent with out tracking. In case of emergency,
-			 * scream.
-			 */
-			WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
-			return ret;
-		}
-
+		/* obj is kept alive until the next request by its active ref */
 		i915_gem_object_unpin(from->obj);
 		i915_gem_object_unpin(from->obj);
 		i915_gem_context_unreference(from);
 		i915_gem_context_unreference(from);
 	}
 	}

+ 43 - 7
drivers/gpu/drm/i915/i915_gem_evict.c

@@ -37,6 +37,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
 	if (vma->obj->pin_count)
 	if (vma->obj->pin_count)
 		return false;
 		return false;
 
 
+	if (WARN_ON(!list_empty(&vma->exec_list)))
+		return false;
+
 	list_add(&vma->exec_list, unwind);
 	list_add(&vma->exec_list, unwind);
 	return drm_mm_scan_add_block(&vma->node);
 	return drm_mm_scan_add_block(&vma->node);
 }
 }
@@ -113,7 +116,7 @@ none:
 	}
 	}
 
 
 	/* We expect the caller to unpin, evict all and try again, or give up.
 	/* We expect the caller to unpin, evict all and try again, or give up.
-	 * So calling i915_gem_evict_everything() is unnecessary.
+	 * So calling i915_gem_evict_vm() is unnecessary.
 	 */
 	 */
 	return -ENOSPC;
 	return -ENOSPC;
 
 
@@ -152,12 +155,48 @@ found:
 	return ret;
 	return ret;
 }
 }
 
 
+/**
+ * i915_gem_evict_vm - Try to free up VM space
+ *
+ * @vm: Address space to evict from
+ * @do_idle: Boolean directing whether to idle first.
+ *
+ * VM eviction is about freeing up virtual address space. If one wants fine
+ * grained eviction, they should see evict something for more details. In terms
+ * of freeing up actual system memory, this function may not accomplish the
+ * desired result. An object may be shared in multiple address space, and this
+ * function will not assert those objects be freed.
+ *
+ * Using do_idle will result in a more complete eviction because it retires, and
+ * inactivates current BOs.
+ */
+int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
+{
+	struct i915_vma *vma, *next;
+	int ret;
+
+	trace_i915_gem_evict_vm(vm);
+
+	if (do_idle) {
+		ret = i915_gpu_idle(vm->dev);
+		if (ret)
+			return ret;
+
+		i915_gem_retire_requests(vm->dev);
+	}
+
+	list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
+		if (vma->obj->pin_count == 0)
+			WARN_ON(i915_vma_unbind(vma));
+
+	return 0;
+}
+
 int
 int
 i915_gem_evict_everything(struct drm_device *dev)
 i915_gem_evict_everything(struct drm_device *dev)
 {
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct i915_address_space *vm;
 	struct i915_address_space *vm;
-	struct i915_vma *vma, *next;
 	bool lists_empty = true;
 	bool lists_empty = true;
 	int ret;
 	int ret;
 
 
@@ -184,11 +223,8 @@ i915_gem_evict_everything(struct drm_device *dev)
 	i915_gem_retire_requests(dev);
 	i915_gem_retire_requests(dev);
 
 
 	/* Having flushed everything, unbind() should never raise an error */
 	/* Having flushed everything, unbind() should never raise an error */
-	list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
-		list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
-			if (vma->obj->pin_count == 0)
-				WARN_ON(i915_vma_unbind(vma));
-	}
+	list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+		WARN_ON(i915_gem_evict_vm(vm, false));
 
 
 	return 0;
 	return 0;
 }
 }

+ 243 - 158
drivers/gpu/drm/i915/i915_gem_execbuffer.c

@@ -33,35 +33,35 @@
 #include "intel_drv.h"
 #include "intel_drv.h"
 #include <linux/dma_remapping.h>
 #include <linux/dma_remapping.h>
 
 
-struct eb_objects {
-	struct list_head objects;
+struct eb_vmas {
+	struct list_head vmas;
 	int and;
 	int and;
 	union {
 	union {
-		struct drm_i915_gem_object *lut[0];
+		struct i915_vma *lut[0];
 		struct hlist_head buckets[0];
 		struct hlist_head buckets[0];
 	};
 	};
 };
 };
 
 
-static struct eb_objects *
-eb_create(struct drm_i915_gem_execbuffer2 *args)
+static struct eb_vmas *
+eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
 {
 {
-	struct eb_objects *eb = NULL;
+	struct eb_vmas *eb = NULL;
 
 
 	if (args->flags & I915_EXEC_HANDLE_LUT) {
 	if (args->flags & I915_EXEC_HANDLE_LUT) {
-		int size = args->buffer_count;
-		size *= sizeof(struct drm_i915_gem_object *);
-		size += sizeof(struct eb_objects);
+		unsigned size = args->buffer_count;
+		size *= sizeof(struct i915_vma *);
+		size += sizeof(struct eb_vmas);
 		eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
 		eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
 	}
 	}
 
 
 	if (eb == NULL) {
 	if (eb == NULL) {
-		int size = args->buffer_count;
-		int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+		unsigned size = args->buffer_count;
+		unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
 		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
 		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
 		while (count > 2*size)
 		while (count > 2*size)
 			count >>= 1;
 			count >>= 1;
 		eb = kzalloc(count*sizeof(struct hlist_head) +
 		eb = kzalloc(count*sizeof(struct hlist_head) +
-			     sizeof(struct eb_objects),
+			     sizeof(struct eb_vmas),
 			     GFP_TEMPORARY);
 			     GFP_TEMPORARY);
 		if (eb == NULL)
 		if (eb == NULL)
 			return eb;
 			return eb;
@@ -70,64 +70,102 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
 	} else
 	} else
 		eb->and = -args->buffer_count;
 		eb->and = -args->buffer_count;
 
 
-	INIT_LIST_HEAD(&eb->objects);
+	INIT_LIST_HEAD(&eb->vmas);
 	return eb;
 	return eb;
 }
 }
 
 
 static void
 static void
-eb_reset(struct eb_objects *eb)
+eb_reset(struct eb_vmas *eb)
 {
 {
 	if (eb->and >= 0)
 	if (eb->and >= 0)
 		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
 		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
 }
 }
 
 
 static int
 static int
-eb_lookup_objects(struct eb_objects *eb,
-		  struct drm_i915_gem_exec_object2 *exec,
-		  const struct drm_i915_gem_execbuffer2 *args,
-		  struct drm_file *file)
+eb_lookup_vmas(struct eb_vmas *eb,
+	       struct drm_i915_gem_exec_object2 *exec,
+	       const struct drm_i915_gem_execbuffer2 *args,
+	       struct i915_address_space *vm,
+	       struct drm_file *file)
 {
 {
-	int i;
+	struct drm_i915_gem_object *obj;
+	struct list_head objects;
+	int i, ret = 0;
 
 
+	INIT_LIST_HEAD(&objects);
 	spin_lock(&file->table_lock);
 	spin_lock(&file->table_lock);
+	/* Grab a reference to the object and release the lock so we can lookup
+	 * or create the VMA without using GFP_ATOMIC */
 	for (i = 0; i < args->buffer_count; i++) {
 	for (i = 0; i < args->buffer_count; i++) {
-		struct drm_i915_gem_object *obj;
-
 		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
 		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
 		if (obj == NULL) {
 		if (obj == NULL) {
 			spin_unlock(&file->table_lock);
 			spin_unlock(&file->table_lock);
 			DRM_DEBUG("Invalid object handle %d at index %d\n",
 			DRM_DEBUG("Invalid object handle %d at index %d\n",
 				   exec[i].handle, i);
 				   exec[i].handle, i);
-			return -ENOENT;
+			ret = -ENOENT;
+			goto out;
 		}
 		}
 
 
-		if (!list_empty(&obj->exec_list)) {
+		if (!list_empty(&obj->obj_exec_link)) {
 			spin_unlock(&file->table_lock);
 			spin_unlock(&file->table_lock);
 			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
 			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
 				   obj, exec[i].handle, i);
 				   obj, exec[i].handle, i);
-			return -EINVAL;
+			ret = -EINVAL;
+			goto out;
 		}
 		}
 
 
 		drm_gem_object_reference(&obj->base);
 		drm_gem_object_reference(&obj->base);
-		list_add_tail(&obj->exec_list, &eb->objects);
+		list_add_tail(&obj->obj_exec_link, &objects);
+	}
+	spin_unlock(&file->table_lock);
 
 
-		obj->exec_entry = &exec[i];
+	i = 0;
+	list_for_each_entry(obj, &objects, obj_exec_link) {
+		struct i915_vma *vma;
+
+		/*
+		 * NOTE: We can leak any vmas created here when something fails
+		 * later on. But that's no issue since vma_unbind can deal with
+		 * vmas which are not actually bound. And since only
+		 * lookup_or_create exists as an interface to get at the vma
+		 * from the (obj, vm) we don't run the risk of creating
+		 * duplicated vmas for the same vm.
+		 */
+		vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+		if (IS_ERR(vma)) {
+			DRM_DEBUG("Failed to lookup VMA\n");
+			ret = PTR_ERR(vma);
+			goto out;
+		}
+
+		list_add_tail(&vma->exec_list, &eb->vmas);
+
+		vma->exec_entry = &exec[i];
 		if (eb->and < 0) {
 		if (eb->and < 0) {
-			eb->lut[i] = obj;
+			eb->lut[i] = vma;
 		} else {
 		} else {
 			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
 			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
-			obj->exec_handle = handle;
-			hlist_add_head(&obj->exec_node,
+			vma->exec_handle = handle;
+			hlist_add_head(&vma->exec_node,
 				       &eb->buckets[handle & eb->and]);
 				       &eb->buckets[handle & eb->and]);
 		}
 		}
+		++i;
 	}
 	}
-	spin_unlock(&file->table_lock);
 
 
-	return 0;
+
+out:
+	while (!list_empty(&objects)) {
+		obj = list_first_entry(&objects,
+				       struct drm_i915_gem_object,
+				       obj_exec_link);
+		list_del_init(&obj->obj_exec_link);
+		if (ret)
+			drm_gem_object_unreference(&obj->base);
+	}
+	return ret;
 }
 }
 
 
-static struct drm_i915_gem_object *
-eb_get_object(struct eb_objects *eb, unsigned long handle)
+static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
 {
 {
 	if (eb->and < 0) {
 	if (eb->and < 0) {
 		if (handle >= -eb->and)
 		if (handle >= -eb->and)
@@ -139,34 +177,33 @@ eb_get_object(struct eb_objects *eb, unsigned long handle)
 
 
 		head = &eb->buckets[handle & eb->and];
 		head = &eb->buckets[handle & eb->and];
 		hlist_for_each(node, head) {
 		hlist_for_each(node, head) {
-			struct drm_i915_gem_object *obj;
+			struct i915_vma *vma;
 
 
-			obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
-			if (obj->exec_handle == handle)
-				return obj;
+			vma = hlist_entry(node, struct i915_vma, exec_node);
+			if (vma->exec_handle == handle)
+				return vma;
 		}
 		}
 		return NULL;
 		return NULL;
 	}
 	}
 }
 }
 
 
-static void
-eb_destroy(struct eb_objects *eb)
-{
-	while (!list_empty(&eb->objects)) {
-		struct drm_i915_gem_object *obj;
+static void eb_destroy(struct eb_vmas *eb) {
+	while (!list_empty(&eb->vmas)) {
+		struct i915_vma *vma;
 
 
-		obj = list_first_entry(&eb->objects,
-				       struct drm_i915_gem_object,
+		vma = list_first_entry(&eb->vmas,
+				       struct i915_vma,
 				       exec_list);
 				       exec_list);
-		list_del_init(&obj->exec_list);
-		drm_gem_object_unreference(&obj->base);
+		list_del_init(&vma->exec_list);
+		drm_gem_object_unreference(&vma->obj->base);
 	}
 	}
 	kfree(eb);
 	kfree(eb);
 }
 }
 
 
 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
 {
 {
-	return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
+	return (HAS_LLC(obj->base.dev) ||
+		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
 		!obj->map_and_fenceable ||
 		!obj->map_and_fenceable ||
 		obj->cache_level != I915_CACHE_NONE);
 		obj->cache_level != I915_CACHE_NONE);
 }
 }
@@ -175,17 +212,31 @@ static int
 relocate_entry_cpu(struct drm_i915_gem_object *obj,
 relocate_entry_cpu(struct drm_i915_gem_object *obj,
 		   struct drm_i915_gem_relocation_entry *reloc)
 		   struct drm_i915_gem_relocation_entry *reloc)
 {
 {
+	struct drm_device *dev = obj->base.dev;
 	uint32_t page_offset = offset_in_page(reloc->offset);
 	uint32_t page_offset = offset_in_page(reloc->offset);
 	char *vaddr;
 	char *vaddr;
 	int ret = -EINVAL;
 	int ret = -EINVAL;
 
 
-	ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+	ret = i915_gem_object_set_to_cpu_domain(obj, true);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
 	vaddr = kmap_atomic(i915_gem_object_get_page(obj,
 	vaddr = kmap_atomic(i915_gem_object_get_page(obj,
 				reloc->offset >> PAGE_SHIFT));
 				reloc->offset >> PAGE_SHIFT));
 	*(uint32_t *)(vaddr + page_offset) = reloc->delta;
 	*(uint32_t *)(vaddr + page_offset) = reloc->delta;
+
+	if (INTEL_INFO(dev)->gen >= 8) {
+		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
+
+		if (page_offset == 0) {
+			kunmap_atomic(vaddr);
+			vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
+		}
+
+		*(uint32_t *)(vaddr + page_offset) = 0;
+	}
+
 	kunmap_atomic(vaddr);
 	kunmap_atomic(vaddr);
 
 
 	return 0;
 	return 0;
@@ -216,6 +267,21 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
 	reloc_entry = (uint32_t __iomem *)
 	reloc_entry = (uint32_t __iomem *)
 		(reloc_page + offset_in_page(reloc->offset));
 		(reloc_page + offset_in_page(reloc->offset));
 	iowrite32(reloc->delta, reloc_entry);
 	iowrite32(reloc->delta, reloc_entry);
+
+	if (INTEL_INFO(dev)->gen >= 8) {
+		reloc_entry += 1;
+
+		if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
+			io_mapping_unmap_atomic(reloc_page);
+			reloc_page = io_mapping_map_atomic_wc(
+					dev_priv->gtt.mappable,
+					reloc->offset + sizeof(uint32_t));
+			reloc_entry = reloc_page;
+		}
+
+		iowrite32(0, reloc_entry);
+	}
+
 	io_mapping_unmap_atomic(reloc_page);
 	io_mapping_unmap_atomic(reloc_page);
 
 
 	return 0;
 	return 0;
@@ -223,22 +289,24 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
 
 
 static int
 static int
 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
-				   struct eb_objects *eb,
+				   struct eb_vmas *eb,
 				   struct drm_i915_gem_relocation_entry *reloc,
 				   struct drm_i915_gem_relocation_entry *reloc,
 				   struct i915_address_space *vm)
 				   struct i915_address_space *vm)
 {
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_device *dev = obj->base.dev;
 	struct drm_gem_object *target_obj;
 	struct drm_gem_object *target_obj;
 	struct drm_i915_gem_object *target_i915_obj;
 	struct drm_i915_gem_object *target_i915_obj;
+	struct i915_vma *target_vma;
 	uint32_t target_offset;
 	uint32_t target_offset;
 	int ret = -EINVAL;
 	int ret = -EINVAL;
 
 
 	/* we've already hold a reference to all valid objects */
 	/* we've already hold a reference to all valid objects */
-	target_obj = &eb_get_object(eb, reloc->target_handle)->base;
-	if (unlikely(target_obj == NULL))
+	target_vma = eb_get_vma(eb, reloc->target_handle);
+	if (unlikely(target_vma == NULL))
 		return -ENOENT;
 		return -ENOENT;
+	target_i915_obj = target_vma->obj;
+	target_obj = &target_vma->obj->base;
 
 
-	target_i915_obj = to_intel_bo(target_obj);
 	target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
 	target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
 
 
 	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
 	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
@@ -284,7 +352,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 		return 0;
 		return 0;
 
 
 	/* Check that the relocation address is valid... */
 	/* Check that the relocation address is valid... */
-	if (unlikely(reloc->offset > obj->base.size - 4)) {
+	if (unlikely(reloc->offset >
+		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
 		DRM_DEBUG("Relocation beyond object bounds: "
 		DRM_DEBUG("Relocation beyond object bounds: "
 			  "obj %p target %d offset %d size %d.\n",
 			  "obj %p target %d offset %d size %d.\n",
 			  obj, reloc->target_handle,
 			  obj, reloc->target_handle,
@@ -320,14 +389,13 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 }
 }
 
 
 static int
 static int
-i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
-				    struct eb_objects *eb,
-				    struct i915_address_space *vm)
+i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
+				 struct eb_vmas *eb)
 {
 {
 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
 	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
 	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
 	struct drm_i915_gem_relocation_entry __user *user_relocs;
 	struct drm_i915_gem_relocation_entry __user *user_relocs;
-	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 	int remain, ret;
 	int remain, ret;
 
 
 	user_relocs = to_user_ptr(entry->relocs_ptr);
 	user_relocs = to_user_ptr(entry->relocs_ptr);
@@ -346,8 +414,8 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
 		do {
 		do {
 			u64 offset = r->presumed_offset;
 			u64 offset = r->presumed_offset;
 
 
-			ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
-								 vm);
+			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
+								 vma->vm);
 			if (ret)
 			if (ret)
 				return ret;
 				return ret;
 
 
@@ -368,17 +436,16 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
 }
 }
 
 
 static int
 static int
-i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
-					 struct eb_objects *eb,
-					 struct drm_i915_gem_relocation_entry *relocs,
-					 struct i915_address_space *vm)
+i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
+				      struct eb_vmas *eb,
+				      struct drm_i915_gem_relocation_entry *relocs)
 {
 {
-	const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 	int i, ret;
 	int i, ret;
 
 
 	for (i = 0; i < entry->relocation_count; i++) {
 	for (i = 0; i < entry->relocation_count; i++) {
-		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
-							 vm);
+		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
+							 vma->vm);
 		if (ret)
 		if (ret)
 			return ret;
 			return ret;
 	}
 	}
@@ -387,10 +454,10 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
 }
 }
 
 
 static int
 static int
-i915_gem_execbuffer_relocate(struct eb_objects *eb,
+i915_gem_execbuffer_relocate(struct eb_vmas *eb,
 			     struct i915_address_space *vm)
 			     struct i915_address_space *vm)
 {
 {
-	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 	int ret = 0;
 	int ret = 0;
 
 
 	/* This is the fast path and we cannot handle a pagefault whilst
 	/* This is the fast path and we cannot handle a pagefault whilst
@@ -401,8 +468,8 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb,
 	 * lockdep complains vehemently.
 	 * lockdep complains vehemently.
 	 */
 	 */
 	pagefault_disable();
 	pagefault_disable();
-	list_for_each_entry(obj, &eb->objects, exec_list) {
-		ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
+	list_for_each_entry(vma, &eb->vmas, exec_list) {
+		ret = i915_gem_execbuffer_relocate_vma(vma, eb);
 		if (ret)
 		if (ret)
 			break;
 			break;
 	}
 	}
@@ -415,31 +482,32 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb,
 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
 
 
 static int
 static int
-need_reloc_mappable(struct drm_i915_gem_object *obj)
+need_reloc_mappable(struct i915_vma *vma)
 {
 {
-	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
-	return entry->relocation_count && !use_cpu_reloc(obj);
+	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+	return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
+		i915_is_ggtt(vma->vm);
 }
 }
 
 
 static int
 static int
-i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
-				   struct intel_ring_buffer *ring,
-				   struct i915_address_space *vm,
-				   bool *need_reloc)
+i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
+				struct intel_ring_buffer *ring,
+				bool *need_reloc)
 {
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
 	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
 	bool need_fence, need_mappable;
 	bool need_fence, need_mappable;
+	struct drm_i915_gem_object *obj = vma->obj;
 	int ret;
 	int ret;
 
 
 	need_fence =
 	need_fence =
 		has_fenced_gpu_access &&
 		has_fenced_gpu_access &&
 		entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 		entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 		obj->tiling_mode != I915_TILING_NONE;
 		obj->tiling_mode != I915_TILING_NONE;
-	need_mappable = need_fence || need_reloc_mappable(obj);
+	need_mappable = need_fence || need_reloc_mappable(vma);
 
 
-	ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
+	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
 				  false);
 				  false);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
@@ -467,8 +535,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
 		obj->has_aliasing_ppgtt_mapping = 1;
 		obj->has_aliasing_ppgtt_mapping = 1;
 	}
 	}
 
 
-	if (entry->offset != i915_gem_obj_offset(obj, vm)) {
-		entry->offset = i915_gem_obj_offset(obj, vm);
+	if (entry->offset != vma->node.start) {
+		entry->offset = vma->node.start;
 		*need_reloc = true;
 		*need_reloc = true;
 	}
 	}
 
 
@@ -485,14 +553,15 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
 }
 }
 
 
 static void
 static void
-i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
+i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
 {
 {
 	struct drm_i915_gem_exec_object2 *entry;
 	struct drm_i915_gem_exec_object2 *entry;
+	struct drm_i915_gem_object *obj = vma->obj;
 
 
-	if (!i915_gem_obj_bound_any(obj))
+	if (!drm_mm_node_allocated(&vma->node))
 		return;
 		return;
 
 
-	entry = obj->exec_entry;
+	entry = vma->exec_entry;
 
 
 	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
 	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
 		i915_gem_object_unpin_fence(obj);
 		i915_gem_object_unpin_fence(obj);
@@ -505,41 +574,46 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
 
 
 static int
 static int
 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
-			    struct list_head *objects,
-			    struct i915_address_space *vm,
+			    struct list_head *vmas,
 			    bool *need_relocs)
 			    bool *need_relocs)
 {
 {
 	struct drm_i915_gem_object *obj;
 	struct drm_i915_gem_object *obj;
-	struct list_head ordered_objects;
+	struct i915_vma *vma;
+	struct i915_address_space *vm;
+	struct list_head ordered_vmas;
 	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
 	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
 	int retry;
 	int retry;
 
 
-	INIT_LIST_HEAD(&ordered_objects);
-	while (!list_empty(objects)) {
+	if (list_empty(vmas))
+		return 0;
+
+	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
+
+	INIT_LIST_HEAD(&ordered_vmas);
+	while (!list_empty(vmas)) {
 		struct drm_i915_gem_exec_object2 *entry;
 		struct drm_i915_gem_exec_object2 *entry;
 		bool need_fence, need_mappable;
 		bool need_fence, need_mappable;
 
 
-		obj = list_first_entry(objects,
-				       struct drm_i915_gem_object,
-				       exec_list);
-		entry = obj->exec_entry;
+		vma = list_first_entry(vmas, struct i915_vma, exec_list);
+		obj = vma->obj;
+		entry = vma->exec_entry;
 
 
 		need_fence =
 		need_fence =
 			has_fenced_gpu_access &&
 			has_fenced_gpu_access &&
 			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 			obj->tiling_mode != I915_TILING_NONE;
 			obj->tiling_mode != I915_TILING_NONE;
-		need_mappable = need_fence || need_reloc_mappable(obj);
+		need_mappable = need_fence || need_reloc_mappable(vma);
 
 
 		if (need_mappable)
 		if (need_mappable)
-			list_move(&obj->exec_list, &ordered_objects);
+			list_move(&vma->exec_list, &ordered_vmas);
 		else
 		else
-			list_move_tail(&obj->exec_list, &ordered_objects);
+			list_move_tail(&vma->exec_list, &ordered_vmas);
 
 
 		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
 		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
 		obj->base.pending_write_domain = 0;
 		obj->base.pending_write_domain = 0;
 		obj->pending_fenced_gpu_access = false;
 		obj->pending_fenced_gpu_access = false;
 	}
 	}
-	list_splice(&ordered_objects, objects);
+	list_splice(&ordered_vmas, vmas);
 
 
 	/* Attempt to pin all of the buffers into the GTT.
 	/* Attempt to pin all of the buffers into the GTT.
 	 * This is done in 3 phases:
 	 * This is done in 3 phases:
@@ -558,52 +632,52 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 		int ret = 0;
 		int ret = 0;
 
 
 		/* Unbind any ill-fitting objects or pin. */
 		/* Unbind any ill-fitting objects or pin. */
-		list_for_each_entry(obj, objects, exec_list) {
-			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+		list_for_each_entry(vma, vmas, exec_list) {
+			struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 			bool need_fence, need_mappable;
 			bool need_fence, need_mappable;
-			u32 obj_offset;
 
 
-			if (!i915_gem_obj_bound(obj, vm))
+			obj = vma->obj;
+
+			if (!drm_mm_node_allocated(&vma->node))
 				continue;
 				continue;
 
 
-			obj_offset = i915_gem_obj_offset(obj, vm);
 			need_fence =
 			need_fence =
 				has_fenced_gpu_access &&
 				has_fenced_gpu_access &&
 				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 				obj->tiling_mode != I915_TILING_NONE;
 				obj->tiling_mode != I915_TILING_NONE;
-			need_mappable = need_fence || need_reloc_mappable(obj);
+			need_mappable = need_fence || need_reloc_mappable(vma);
 
 
 			WARN_ON((need_mappable || need_fence) &&
 			WARN_ON((need_mappable || need_fence) &&
-				!i915_is_ggtt(vm));
+			       !i915_is_ggtt(vma->vm));
 
 
 			if ((entry->alignment &&
 			if ((entry->alignment &&
-			     obj_offset & (entry->alignment - 1)) ||
+			     vma->node.start & (entry->alignment - 1)) ||
 			    (need_mappable && !obj->map_and_fenceable))
 			    (need_mappable && !obj->map_and_fenceable))
-				ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
+				ret = i915_vma_unbind(vma);
 			else
 			else
-				ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
+				ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
 			if (ret)
 			if (ret)
 				goto err;
 				goto err;
 		}
 		}
 
 
 		/* Bind fresh objects */
 		/* Bind fresh objects */
-		list_for_each_entry(obj, objects, exec_list) {
-			if (i915_gem_obj_bound(obj, vm))
+		list_for_each_entry(vma, vmas, exec_list) {
+			if (drm_mm_node_allocated(&vma->node))
 				continue;
 				continue;
 
 
-			ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
+			ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
 			if (ret)
 			if (ret)
 				goto err;
 				goto err;
 		}
 		}
 
 
 err:		/* Decrement pin count for bound objects */
 err:		/* Decrement pin count for bound objects */
-		list_for_each_entry(obj, objects, exec_list)
-			i915_gem_execbuffer_unreserve_object(obj);
+		list_for_each_entry(vma, vmas, exec_list)
+			i915_gem_execbuffer_unreserve_vma(vma);
 
 
 		if (ret != -ENOSPC || retry++)
 		if (ret != -ENOSPC || retry++)
 			return ret;
 			return ret;
 
 
-		ret = i915_gem_evict_everything(ring->dev);
+		ret = i915_gem_evict_vm(vm, true);
 		if (ret)
 		if (ret)
 			return ret;
 			return ret;
 	} while (1);
 	} while (1);
@@ -614,24 +688,27 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
 				  struct drm_i915_gem_execbuffer2 *args,
 				  struct drm_i915_gem_execbuffer2 *args,
 				  struct drm_file *file,
 				  struct drm_file *file,
 				  struct intel_ring_buffer *ring,
 				  struct intel_ring_buffer *ring,
-				  struct eb_objects *eb,
-				  struct drm_i915_gem_exec_object2 *exec,
-				  struct i915_address_space *vm)
+				  struct eb_vmas *eb,
+				  struct drm_i915_gem_exec_object2 *exec)
 {
 {
 	struct drm_i915_gem_relocation_entry *reloc;
 	struct drm_i915_gem_relocation_entry *reloc;
-	struct drm_i915_gem_object *obj;
+	struct i915_address_space *vm;
+	struct i915_vma *vma;
 	bool need_relocs;
 	bool need_relocs;
 	int *reloc_offset;
 	int *reloc_offset;
 	int i, total, ret;
 	int i, total, ret;
-	int count = args->buffer_count;
+	unsigned count = args->buffer_count;
+
+	if (WARN_ON(list_empty(&eb->vmas)))
+		return 0;
+
+	vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
 
 
 	/* We may process another execbuffer during the unlock... */
 	/* We may process another execbuffer during the unlock... */
-	while (!list_empty(&eb->objects)) {
-		obj = list_first_entry(&eb->objects,
-				       struct drm_i915_gem_object,
-				       exec_list);
-		list_del_init(&obj->exec_list);
-		drm_gem_object_unreference(&obj->base);
+	while (!list_empty(&eb->vmas)) {
+		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
+		list_del_init(&vma->exec_list);
+		drm_gem_object_unreference(&vma->obj->base);
 	}
 	}
 
 
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
@@ -695,20 +772,19 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
 
 
 	/* reacquire the objects */
 	/* reacquire the objects */
 	eb_reset(eb);
 	eb_reset(eb);
-	ret = eb_lookup_objects(eb, exec, args, file);
+	ret = eb_lookup_vmas(eb, exec, args, vm, file);
 	if (ret)
 	if (ret)
 		goto err;
 		goto err;
 
 
 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-	ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
+	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
 	if (ret)
 	if (ret)
 		goto err;
 		goto err;
 
 
-	list_for_each_entry(obj, &eb->objects, exec_list) {
-		int offset = obj->exec_entry - exec;
-		ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
-							       reloc + reloc_offset[offset],
-							       vm);
+	list_for_each_entry(vma, &eb->vmas, exec_list) {
+		int offset = vma->exec_entry - exec;
+		ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
+							    reloc + reloc_offset[offset]);
 		if (ret)
 		if (ret)
 			goto err;
 			goto err;
 	}
 	}
@@ -727,14 +803,15 @@ err:
 
 
 static int
 static int
 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
-				struct list_head *objects)
+				struct list_head *vmas)
 {
 {
-	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 	uint32_t flush_domains = 0;
 	uint32_t flush_domains = 0;
 	bool flush_chipset = false;
 	bool flush_chipset = false;
 	int ret;
 	int ret;
 
 
-	list_for_each_entry(obj, objects, exec_list) {
+	list_for_each_entry(vma, vmas, exec_list) {
+		struct drm_i915_gem_object *obj = vma->obj;
 		ret = i915_gem_object_sync(obj, ring);
 		ret = i915_gem_object_sync(obj, ring);
 		if (ret)
 		if (ret)
 			return ret;
 			return ret;
@@ -771,8 +848,8 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
 		   int count)
 		   int count)
 {
 {
 	int i;
 	int i;
-	int relocs_total = 0;
-	int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
+	unsigned relocs_total = 0;
+	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
 
 
 	for (i = 0; i < count; i++) {
 	for (i = 0; i < count; i++) {
 		char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
 		char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
@@ -809,13 +886,13 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
 }
 }
 
 
 static void
 static void
-i915_gem_execbuffer_move_to_active(struct list_head *objects,
-				   struct i915_address_space *vm,
+i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 				   struct intel_ring_buffer *ring)
 				   struct intel_ring_buffer *ring)
 {
 {
-	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 
 
-	list_for_each_entry(obj, objects, exec_list) {
+	list_for_each_entry(vma, vmas, exec_list) {
+		struct drm_i915_gem_object *obj = vma->obj;
 		u32 old_read = obj->base.read_domains;
 		u32 old_read = obj->base.read_domains;
 		u32 old_write = obj->base.write_domain;
 		u32 old_write = obj->base.write_domain;
 
 
@@ -825,9 +902,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
 		obj->base.read_domains = obj->base.pending_read_domains;
 		obj->base.read_domains = obj->base.pending_read_domains;
 		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
 		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
 
 
-		/* FIXME: This lookup gets fixed later <-- danvet */
-		list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
-		i915_gem_object_move_to_active(obj, ring);
+		i915_vma_move_to_active(vma, ring);
 		if (obj->base.write_domain) {
 		if (obj->base.write_domain) {
 			obj->dirty = 1;
 			obj->dirty = 1;
 			obj->last_write_seqno = intel_ring_get_seqno(ring);
 			obj->last_write_seqno = intel_ring_get_seqno(ring);
@@ -885,10 +960,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		       struct i915_address_space *vm)
 		       struct i915_address_space *vm)
 {
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct eb_objects *eb;
+	struct eb_vmas *eb;
 	struct drm_i915_gem_object *batch_obj;
 	struct drm_i915_gem_object *batch_obj;
 	struct drm_clip_rect *cliprects = NULL;
 	struct drm_clip_rect *cliprects = NULL;
 	struct intel_ring_buffer *ring;
 	struct intel_ring_buffer *ring;
+	struct i915_ctx_hang_stats *hs;
 	u32 ctx_id = i915_execbuffer2_get_context_id(*args);
 	u32 ctx_id = i915_execbuffer2_get_context_id(*args);
 	u32 exec_start, exec_len;
 	u32 exec_start, exec_len;
 	u32 mask, flags;
 	u32 mask, flags;
@@ -1000,7 +1076,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}
 
 
-		cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
+		cliprects = kcalloc(args->num_cliprects,
+				    sizeof(*cliprects),
 				    GFP_KERNEL);
 				    GFP_KERNEL);
 		if (cliprects == NULL) {
 		if (cliprects == NULL) {
 			ret = -ENOMEM;
 			ret = -ENOMEM;
@@ -1025,7 +1102,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		goto pre_mutex_err;
 		goto pre_mutex_err;
 	}
 	}
 
 
-	eb = eb_create(args);
+	eb = eb_create(args, vm);
 	if (eb == NULL) {
 	if (eb == NULL) {
 		mutex_unlock(&dev->struct_mutex);
 		mutex_unlock(&dev->struct_mutex);
 		ret = -ENOMEM;
 		ret = -ENOMEM;
@@ -1033,18 +1110,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	}
 	}
 
 
 	/* Look up object handles */
 	/* Look up object handles */
-	ret = eb_lookup_objects(eb, exec, args, file);
+	ret = eb_lookup_vmas(eb, exec, args, vm, file);
 	if (ret)
 	if (ret)
 		goto err;
 		goto err;
 
 
 	/* take note of the batch buffer before we might reorder the lists */
 	/* take note of the batch buffer before we might reorder the lists */
-	batch_obj = list_entry(eb->objects.prev,
-			       struct drm_i915_gem_object,
-			       exec_list);
+	batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
 
 
 	/* Move the objects en-masse into the GTT, evicting if necessary. */
 	/* Move the objects en-masse into the GTT, evicting if necessary. */
 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-	ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
+	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
 	if (ret)
 	if (ret)
 		goto err;
 		goto err;
 
 
@@ -1054,7 +1129,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	if (ret) {
 	if (ret) {
 		if (ret == -EFAULT) {
 		if (ret == -EFAULT) {
 			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
 			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
-								eb, exec, vm);
+								eb, exec);
 			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 		}
 		}
 		if (ret)
 		if (ret)
@@ -1071,15 +1146,25 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
 
 	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
 	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
 	 * batch" bit. Hence we need to pin secure batches into the global gtt.
 	 * batch" bit. Hence we need to pin secure batches into the global gtt.
-	 * hsw should have this fixed, but let's be paranoid and do it
-	 * unconditionally for now. */
+	 * hsw should have this fixed, but bdw mucks it up again. */
 	if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
 	if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
 		i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
 		i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
 
 
-	ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
+	ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
 	if (ret)
 	if (ret)
 		goto err;
 		goto err;
 
 
+	hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
+	if (IS_ERR(hs)) {
+		ret = PTR_ERR(hs);
+		goto err;
+	}
+
+	if (hs->banned) {
+		ret = -EIO;
+		goto err;
+	}
+
 	ret = i915_switch_context(ring, file, ctx_id);
 	ret = i915_switch_context(ring, file, ctx_id);
 	if (ret)
 	if (ret)
 		goto err;
 		goto err;
@@ -1131,7 +1216,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
 
 	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
 	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
 
 
-	i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
+	i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
 	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
 	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
 
 
 err:
 err:

+ 488 - 20
drivers/gpu/drm/i915/i915_gem_gtt.c

@@ -30,6 +30,8 @@
 
 
 #define GEN6_PPGTT_PD_ENTRIES 512
 #define GEN6_PPGTT_PD_ENTRIES 512
 #define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
 #define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
+typedef uint64_t gen8_gtt_pte_t;
+typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
 
 
 /* PPGTT stuff */
 /* PPGTT stuff */
 #define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
 #define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
@@ -57,6 +59,41 @@
 #define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
 #define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
 #define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
 #define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
 
 
+#define GEN8_PTES_PER_PAGE		(PAGE_SIZE / sizeof(gen8_gtt_pte_t))
+#define GEN8_PDES_PER_PAGE		(PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
+#define GEN8_LEGACY_PDPS		4
+
+#define PPAT_UNCACHED_INDEX		(_PAGE_PWT | _PAGE_PCD)
+#define PPAT_CACHED_PDE_INDEX		0 /* WB LLC */
+#define PPAT_CACHED_INDEX		_PAGE_PAT /* WB LLCeLLC */
+#define PPAT_DISPLAY_ELLC_INDEX		_PAGE_PCD /* WT eLLC */
+
+static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
+					     enum i915_cache_level level,
+					     bool valid)
+{
+	gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
+	pte |= addr;
+	if (level != I915_CACHE_NONE)
+		pte |= PPAT_CACHED_INDEX;
+	else
+		pte |= PPAT_UNCACHED_INDEX;
+	return pte;
+}
+
+static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
+					     dma_addr_t addr,
+					     enum i915_cache_level level)
+{
+	gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
+	pde |= addr;
+	if (level != I915_CACHE_NONE)
+		pde |= PPAT_CACHED_PDE_INDEX;
+	else
+		pde |= PPAT_UNCACHED_INDEX;
+	return pde;
+}
+
 static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
 static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
 				     enum i915_cache_level level,
 				     enum i915_cache_level level,
 				     bool valid)
 				     bool valid)
@@ -158,6 +195,257 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
 	return pte;
 	return pte;
 }
 }
 
 
+/* Broadwell Page Directory Pointer Descriptors */
+static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
+			   uint64_t val)
+{
+	int ret;
+
+	BUG_ON(entry >= 4);
+
+	ret = intel_ring_begin(ring, 6);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+	intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
+	intel_ring_emit(ring, (u32)(val >> 32));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+	intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
+	intel_ring_emit(ring, (u32)(val));
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int gen8_ppgtt_enable(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
+	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+	int i, j, ret;
+
+	/* bit of a hack to find the actual last used pd */
+	int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
+
+	for_each_ring(ring, dev_priv, j) {
+		I915_WRITE(RING_MODE_GEN7(ring),
+			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+	}
+
+	for (i = used_pd - 1; i >= 0; i--) {
+		dma_addr_t addr = ppgtt->pd_dma_addr[i];
+		for_each_ring(ring, dev_priv, j) {
+			ret = gen8_write_pdp(ring, i, addr);
+			if (ret)
+				return ret;
+		}
+	}
+	return 0;
+}
+
+static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
+				   unsigned first_entry,
+				   unsigned num_entries,
+				   bool use_scratch)
+{
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(vm, struct i915_hw_ppgtt, base);
+	gen8_gtt_pte_t *pt_vaddr, scratch_pte;
+	unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
+	unsigned first_pte = first_entry % GEN8_PTES_PER_PAGE;
+	unsigned last_pte, i;
+
+	scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
+				      I915_CACHE_LLC, use_scratch);
+
+	while (num_entries) {
+		struct page *page_table = &ppgtt->gen8_pt_pages[act_pt];
+
+		last_pte = first_pte + num_entries;
+		if (last_pte > GEN8_PTES_PER_PAGE)
+			last_pte = GEN8_PTES_PER_PAGE;
+
+		pt_vaddr = kmap_atomic(page_table);
+
+		for (i = first_pte; i < last_pte; i++)
+			pt_vaddr[i] = scratch_pte;
+
+		kunmap_atomic(pt_vaddr);
+
+		num_entries -= last_pte - first_pte;
+		first_pte = 0;
+		act_pt++;
+	}
+}
+
+static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
+				      struct sg_table *pages,
+				      unsigned first_entry,
+				      enum i915_cache_level cache_level)
+{
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(vm, struct i915_hw_ppgtt, base);
+	gen8_gtt_pte_t *pt_vaddr;
+	unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE;
+	unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE;
+	struct sg_page_iter sg_iter;
+
+	pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
+	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
+		dma_addr_t page_addr;
+
+		page_addr = sg_dma_address(sg_iter.sg) +
+				(sg_iter.sg_pgoffset << PAGE_SHIFT);
+		pt_vaddr[act_pte] = gen8_pte_encode(page_addr, cache_level,
+						    true);
+		if (++act_pte == GEN8_PTES_PER_PAGE) {
+			kunmap_atomic(pt_vaddr);
+			act_pt++;
+			pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]);
+			act_pte = 0;
+
+		}
+	}
+	kunmap_atomic(pt_vaddr);
+}
+
+static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
+{
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(vm, struct i915_hw_ppgtt, base);
+	int i, j;
+
+	for (i = 0; i < ppgtt->num_pd_pages ; i++) {
+		if (ppgtt->pd_dma_addr[i]) {
+			pci_unmap_page(ppgtt->base.dev->pdev,
+				       ppgtt->pd_dma_addr[i],
+				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+
+			for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+				dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
+				if (addr)
+					pci_unmap_page(ppgtt->base.dev->pdev,
+						       addr,
+						       PAGE_SIZE,
+						       PCI_DMA_BIDIRECTIONAL);
+
+			}
+		}
+		kfree(ppgtt->gen8_pt_dma_addr[i]);
+	}
+
+	__free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT);
+	__free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT);
+}
+
+/**
+ * GEN8 legacy ppgtt programming is accomplished through 4 PDP registers with a
+ * net effect resembling a 2-level page table in normal x86 terms. Each PDP
+ * represents 1GB of memory
+ * 4 * 512 * 512 * 4096 = 4GB legacy 32b address space.
+ *
+ * TODO: Do something with the size parameter
+ **/
+static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
+{
+	struct page *pt_pages;
+	int i, j, ret = -ENOMEM;
+	const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
+	const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
+
+	if (size % (1<<30))
+		DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
+
+	/* FIXME: split allocation into smaller pieces. For now we only ever do
+	 * this once, but with full PPGTT, the multiple contiguous allocations
+	 * will be bad.
+	 */
+	ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
+	if (!ppgtt->pd_pages)
+		return -ENOMEM;
+
+	pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT));
+	if (!pt_pages) {
+		__free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
+		return -ENOMEM;
+	}
+
+	ppgtt->gen8_pt_pages = pt_pages;
+	ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
+	ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT);
+	ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
+	ppgtt->enable = gen8_ppgtt_enable;
+	ppgtt->base.clear_range = gen8_ppgtt_clear_range;
+	ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
+	ppgtt->base.cleanup = gen8_ppgtt_cleanup;
+
+	BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
+
+	/*
+	 * - Create a mapping for the page directories.
+	 * - For each page directory:
+	 *      allocate space for page table mappings.
+	 *      map each page table
+	 */
+	for (i = 0; i < max_pdp; i++) {
+		dma_addr_t temp;
+		temp = pci_map_page(ppgtt->base.dev->pdev,
+				    &ppgtt->pd_pages[i], 0,
+				    PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+		if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
+			goto err_out;
+
+		ppgtt->pd_dma_addr[i] = temp;
+
+		ppgtt->gen8_pt_dma_addr[i] = kmalloc(sizeof(dma_addr_t) * GEN8_PDES_PER_PAGE, GFP_KERNEL);
+		if (!ppgtt->gen8_pt_dma_addr[i])
+			goto err_out;
+
+		for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+			struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j];
+			temp = pci_map_page(ppgtt->base.dev->pdev,
+					    p, 0, PAGE_SIZE,
+					    PCI_DMA_BIDIRECTIONAL);
+
+			if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp))
+				goto err_out;
+
+			ppgtt->gen8_pt_dma_addr[i][j] = temp;
+		}
+	}
+
+	/* For now, the PPGTT helper functions all require that the PDEs are
+	 * plugged in correctly. So we do that now/here. For aliasing PPGTT, we
+	 * will never need to touch the PDEs again */
+	for (i = 0; i < max_pdp; i++) {
+		gen8_ppgtt_pde_t *pd_vaddr;
+		pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
+		for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
+			dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
+			pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
+						      I915_CACHE_LLC);
+		}
+		kunmap_atomic(pd_vaddr);
+	}
+
+	ppgtt->base.clear_range(&ppgtt->base, 0,
+				ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE,
+				true);
+
+	DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
+			 ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
+	DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
+			 ppgtt->num_pt_pages,
+			 (ppgtt->num_pt_pages - num_pt_pages) +
+			 size % (1<<30));
+	return 0;
+
+err_out:
+	ppgtt->base.cleanup(&ppgtt->base);
+	return ret;
+}
+
 static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
 static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
 {
 {
 	struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
 	struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
@@ -342,7 +630,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
 	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
 	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
 	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
 	ppgtt->base.scratch = dev_priv->gtt.base.scratch;
 	ppgtt->base.scratch = dev_priv->gtt.base.scratch;
-	ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
+	ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
 				  GFP_KERNEL);
 				  GFP_KERNEL);
 	if (!ppgtt->pt_pages)
 	if (!ppgtt->pt_pages)
 		return -ENOMEM;
 		return -ENOMEM;
@@ -353,7 +641,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 			goto err_pt_alloc;
 			goto err_pt_alloc;
 	}
 	}
 
 
-	ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
+	ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
 				     GFP_KERNEL);
 				     GFP_KERNEL);
 	if (!ppgtt->pt_dma_addr)
 	if (!ppgtt->pt_dma_addr)
 		goto err_pt_alloc;
 		goto err_pt_alloc;
@@ -410,6 +698,8 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
 
 
 	if (INTEL_INFO(dev)->gen < 8)
 	if (INTEL_INFO(dev)->gen < 8)
 		ret = gen6_ppgtt_init(ppgtt);
 		ret = gen6_ppgtt_init(ppgtt);
+	else if (IS_GEN8(dev))
+		ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
 	else
 	else
 		BUG();
 		BUG();
 
 
@@ -573,6 +863,57 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
 	return 0;
 	return 0;
 }
 }
 
 
+static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
+{
+#ifdef writeq
+	writeq(pte, addr);
+#else
+	iowrite32((u32)pte, addr);
+	iowrite32(pte >> 32, addr + 4);
+#endif
+}
+
+static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
+				     struct sg_table *st,
+				     unsigned int first_entry,
+				     enum i915_cache_level level)
+{
+	struct drm_i915_private *dev_priv = vm->dev->dev_private;
+	gen8_gtt_pte_t __iomem *gtt_entries =
+		(gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+	int i = 0;
+	struct sg_page_iter sg_iter;
+	dma_addr_t addr;
+
+	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
+		addr = sg_dma_address(sg_iter.sg) +
+			(sg_iter.sg_pgoffset << PAGE_SHIFT);
+		gen8_set_pte(&gtt_entries[i],
+			     gen8_pte_encode(addr, level, true));
+		i++;
+	}
+
+	/*
+	 * XXX: This serves as a posting read to make sure that the PTE has
+	 * actually been updated. There is some concern that even though
+	 * registers and PTEs are within the same BAR that they are potentially
+	 * of NUMA access patterns. Therefore, even with the way we assume
+	 * hardware should work, we must keep this posting read for paranoia.
+	 */
+	if (i != 0)
+		WARN_ON(readq(&gtt_entries[i-1])
+			!= gen8_pte_encode(addr, level, true));
+
+#if 0 /* TODO: Still needed on GEN8? */
+	/* This next bit makes the above posting read even more important. We
+	 * want to flush the TLBs only after we're certain all the PTE updates
+	 * have finished.
+	 */
+	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+	POSTING_READ(GFX_FLSH_CNTL_GEN6);
+#endif
+}
+
 /*
 /*
  * Binds an object into the global gtt with the specified cache level. The object
  * Binds an object into the global gtt with the specified cache level. The object
  * will be accessible to the GPU via commands whose operands reference offsets
  * will be accessible to the GPU via commands whose operands reference offsets
@@ -615,6 +956,30 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
 	POSTING_READ(GFX_FLSH_CNTL_GEN6);
 	POSTING_READ(GFX_FLSH_CNTL_GEN6);
 }
 }
 
 
+static void gen8_ggtt_clear_range(struct i915_address_space *vm,
+				  unsigned int first_entry,
+				  unsigned int num_entries,
+				  bool use_scratch)
+{
+	struct drm_i915_private *dev_priv = vm->dev->dev_private;
+	gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
+		(gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
+	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+	int i;
+
+	if (WARN(num_entries > max_entries,
+		 "First entry = %d; Num entries = %d (max=%d)\n",
+		 first_entry, num_entries, max_entries))
+		num_entries = max_entries;
+
+	scratch_pte = gen8_pte_encode(vm->scratch.addr,
+				      I915_CACHE_LLC,
+				      use_scratch);
+	for (i = 0; i < num_entries; i++)
+		gen8_set_pte(&gtt_base[i], scratch_pte);
+	readl(gtt_base);
+}
+
 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
 				  unsigned int first_entry,
 				  unsigned int first_entry,
 				  unsigned int num_entries,
 				  unsigned int num_entries,
@@ -638,7 +1003,6 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
 	readl(gtt_base);
 	readl(gtt_base);
 }
 }
 
 
-
 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
 				     struct sg_table *st,
 				     struct sg_table *st,
 				     unsigned int pg_start,
 				     unsigned int pg_start,
@@ -720,6 +1084,7 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
 			*end -= 4096;
 			*end -= 4096;
 	}
 	}
 }
 }
+
 void i915_gem_setup_global_gtt(struct drm_device *dev,
 void i915_gem_setup_global_gtt(struct drm_device *dev,
 			       unsigned long start,
 			       unsigned long start,
 			       unsigned long mappable_end,
 			       unsigned long mappable_end,
@@ -817,7 +1182,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
 
 
 		DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
 		DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
 		drm_mm_takedown(&dev_priv->gtt.base.mm);
 		drm_mm_takedown(&dev_priv->gtt.base.mm);
-		gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
+		if (INTEL_INFO(dev)->gen < 8)
+			gtt_size += GEN6_PPGTT_PD_ENTRIES*PAGE_SIZE;
 	}
 	}
 	i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
 	i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
 }
 }
@@ -867,6 +1233,15 @@ static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
 	return snb_gmch_ctl << 20;
 	return snb_gmch_ctl << 20;
 }
 }
 
 
+static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
+{
+	bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
+	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
+	if (bdw_gmch_ctl)
+		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+	return bdw_gmch_ctl << 20;
+}
+
 static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
 static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
 {
 {
 	snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
 	snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
@@ -874,6 +1249,108 @@ static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
 	return snb_gmch_ctl << 25; /* 32 MB units */
 	return snb_gmch_ctl << 25; /* 32 MB units */
 }
 }
 
 
+static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
+{
+	bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
+	bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
+	return bdw_gmch_ctl << 25; /* 32 MB units */
+}
+
+static int ggtt_probe_common(struct drm_device *dev,
+			     size_t gtt_size)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	phys_addr_t gtt_bus_addr;
+	int ret;
+
+	/* For Modern GENs the PTEs and register space are split in the BAR */
+	gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
+		(pci_resource_len(dev->pdev, 0) / 2);
+
+	dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
+	if (!dev_priv->gtt.gsm) {
+		DRM_ERROR("Failed to map the gtt page table\n");
+		return -ENOMEM;
+	}
+
+	ret = setup_scratch_page(dev);
+	if (ret) {
+		DRM_ERROR("Scratch setup failed\n");
+		/* iounmap will also get called at remove, but meh */
+		iounmap(dev_priv->gtt.gsm);
+	}
+
+	return ret;
+}
+
+/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
+ * bits. When using advanced contexts each context stores its own PAT, but
+ * writing this data shouldn't be harmful even in those cases. */
+static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv)
+{
+#define GEN8_PPAT_UC		(0<<0)
+#define GEN8_PPAT_WC		(1<<0)
+#define GEN8_PPAT_WT		(2<<0)
+#define GEN8_PPAT_WB		(3<<0)
+#define GEN8_PPAT_ELLC_OVERRIDE	(0<<2)
+/* FIXME(BDW): Bspec is completely confused about cache control bits. */
+#define GEN8_PPAT_LLC		(1<<2)
+#define GEN8_PPAT_LLCELLC	(2<<2)
+#define GEN8_PPAT_LLCeLLC	(3<<2)
+#define GEN8_PPAT_AGE(x)	(x<<4)
+#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
+	uint64_t pat;
+
+	pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC)     | /* for normal objects, no eLLC */
+	      GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
+	      GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
+	      GEN8_PPAT(3, GEN8_PPAT_UC)                     | /* Uncached objects, mostly for scanout */
+	      GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
+	      GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
+	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
+	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+
+	/* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
+	 * write would work. */
+	I915_WRITE(GEN8_PRIVATE_PAT, pat);
+	I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
+}
+
+static int gen8_gmch_probe(struct drm_device *dev,
+			   size_t *gtt_total,
+			   size_t *stolen,
+			   phys_addr_t *mappable_base,
+			   unsigned long *mappable_end)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned int gtt_size;
+	u16 snb_gmch_ctl;
+	int ret;
+
+	/* TODO: We're not aware of mappable constraints on gen8 yet */
+	*mappable_base = pci_resource_start(dev->pdev, 2);
+	*mappable_end = pci_resource_len(dev->pdev, 2);
+
+	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
+		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
+
+	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+
+	*stolen = gen8_get_stolen_size(snb_gmch_ctl);
+
+	gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
+	*gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
+
+	gen8_setup_private_ppat(dev_priv);
+
+	ret = ggtt_probe_common(dev, gtt_size);
+
+	dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
+	dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
+
+	return ret;
+}
+
 static int gen6_gmch_probe(struct drm_device *dev,
 static int gen6_gmch_probe(struct drm_device *dev,
 			   size_t *gtt_total,
 			   size_t *gtt_total,
 			   size_t *stolen,
 			   size_t *stolen,
@@ -881,7 +1358,6 @@ static int gen6_gmch_probe(struct drm_device *dev,
 			   unsigned long *mappable_end)
 			   unsigned long *mappable_end)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	phys_addr_t gtt_bus_addr;
 	unsigned int gtt_size;
 	unsigned int gtt_size;
 	u16 snb_gmch_ctl;
 	u16 snb_gmch_ctl;
 	int ret;
 	int ret;
@@ -901,24 +1377,13 @@ static int gen6_gmch_probe(struct drm_device *dev,
 	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
 	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
 		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
 		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
 	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-	gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
 
 
 	*stolen = gen6_get_stolen_size(snb_gmch_ctl);
 	*stolen = gen6_get_stolen_size(snb_gmch_ctl);
-	*gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
-
-	/* For Modern GENs the PTEs and register space are split in the BAR */
-	gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
-		(pci_resource_len(dev->pdev, 0) / 2);
 
 
-	dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
-	if (!dev_priv->gtt.gsm) {
-		DRM_ERROR("Failed to map the gtt page table\n");
-		return -ENOMEM;
-	}
+	gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
+	*gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
 
 
-	ret = setup_scratch_page(dev);
-	if (ret)
-		DRM_ERROR("Scratch setup failed\n");
+	ret = ggtt_probe_common(dev, gtt_size);
 
 
 	dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
 	dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
 	dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
 	dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
@@ -972,7 +1437,7 @@ int i915_gem_gtt_init(struct drm_device *dev)
 	if (INTEL_INFO(dev)->gen <= 5) {
 	if (INTEL_INFO(dev)->gen <= 5) {
 		gtt->gtt_probe = i915_gmch_probe;
 		gtt->gtt_probe = i915_gmch_probe;
 		gtt->base.cleanup = i915_gmch_remove;
 		gtt->base.cleanup = i915_gmch_remove;
-	} else {
+	} else if (INTEL_INFO(dev)->gen < 8) {
 		gtt->gtt_probe = gen6_gmch_probe;
 		gtt->gtt_probe = gen6_gmch_probe;
 		gtt->base.cleanup = gen6_gmch_remove;
 		gtt->base.cleanup = gen6_gmch_remove;
 		if (IS_HASWELL(dev) && dev_priv->ellc_size)
 		if (IS_HASWELL(dev) && dev_priv->ellc_size)
@@ -985,6 +1450,9 @@ int i915_gem_gtt_init(struct drm_device *dev)
 			gtt->base.pte_encode = ivb_pte_encode;
 			gtt->base.pte_encode = ivb_pte_encode;
 		else
 		else
 			gtt->base.pte_encode = snb_pte_encode;
 			gtt->base.pte_encode = snb_pte_encode;
+	} else {
+		dev_priv->gtt.gtt_probe = gen8_gmch_probe;
+		dev_priv->gtt.base.cleanup = gen6_gmch_remove;
 	}
 	}
 
 
 	ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
 	ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,

+ 1 - 1
drivers/gpu/drm/i915/i915_gem_stolen.c

@@ -395,7 +395,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 	if (gtt_offset == I915_GTT_OFFSET_NONE)
 	if (gtt_offset == I915_GTT_OFFSET_NONE)
 		return obj;
 		return obj;
 
 
-	vma = i915_gem_vma_create(obj, ggtt);
+	vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
 	if (IS_ERR(vma)) {
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
 		ret = PTR_ERR(vma);
 		goto err_out;
 		goto err_out;

+ 4 - 4
drivers/gpu/drm/i915/i915_gem_tiling.c

@@ -308,7 +308,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	if (obj->pin_count) {
+	if (obj->pin_count || obj->framebuffer_references) {
 		drm_gem_object_unreference_unlocked(&obj->base);
 		drm_gem_object_unreference_unlocked(&obj->base);
 		return -EBUSY;
 		return -EBUSY;
 	}
 	}
@@ -393,7 +393,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
 	/* Try to preallocate memory required to save swizzling on put-pages */
 	/* Try to preallocate memory required to save swizzling on put-pages */
 	if (i915_gem_object_needs_bit17_swizzle(obj)) {
 	if (i915_gem_object_needs_bit17_swizzle(obj)) {
 		if (obj->bit_17 == NULL) {
 		if (obj->bit_17 == NULL) {
-			obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) *
+			obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
 					      sizeof(long), GFP_KERNEL);
 					      sizeof(long), GFP_KERNEL);
 		}
 		}
 	} else {
 	} else {
@@ -504,8 +504,8 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
 	int i;
 	int i;
 
 
 	if (obj->bit_17 == NULL) {
 	if (obj->bit_17 == NULL) {
-		obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
-					   sizeof(long), GFP_KERNEL);
+		obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
+				      sizeof(long), GFP_KERNEL);
 		if (obj->bit_17 == NULL) {
 		if (obj->bit_17 == NULL) {
 			DRM_ERROR("Failed to allocate memory for bit 17 "
 			DRM_ERROR("Failed to allocate memory for bit 17 "
 				  "record\n");
 				  "record\n");

+ 40 - 6
drivers/gpu/drm/i915/i915_gpu_error.c

@@ -215,6 +215,24 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
 	}
 	}
 }
 }
 
 
+static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
+{
+	switch (a) {
+	case HANGCHECK_IDLE:
+		return "idle";
+	case HANGCHECK_WAIT:
+		return "wait";
+	case HANGCHECK_ACTIVE:
+		return "active";
+	case HANGCHECK_KICK:
+		return "kick";
+	case HANGCHECK_HUNG:
+		return "hung";
+	}
+
+	return "unknown";
+}
+
 static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
 static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
 				  struct drm_device *dev,
 				  struct drm_device *dev,
 				  struct drm_i915_error_state *error,
 				  struct drm_i915_error_state *error,
@@ -231,7 +249,8 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
 	err_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]);
 	err_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]);
 	if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
 	if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
 		err_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr);
 		err_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr);
-
+	if (INTEL_INFO(dev)->gen >= 4)
+		err_printf(m, "  BB_STATE: 0x%08x\n", error->bbstate[ring]);
 	if (INTEL_INFO(dev)->gen >= 4)
 	if (INTEL_INFO(dev)->gen >= 4)
 		err_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
 		err_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
 	err_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
 	err_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
@@ -255,6 +274,9 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
 	err_printf(m, "  waiting: %s\n", yesno(error->waiting[ring]));
 	err_printf(m, "  waiting: %s\n", yesno(error->waiting[ring]));
 	err_printf(m, "  ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
 	err_printf(m, "  ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
 	err_printf(m, "  ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
 	err_printf(m, "  ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
+	err_printf(m, "  hangcheck: %s [%d]\n",
+		   hangcheck_action_to_str(error->hangcheck_action[ring]),
+		   error->hangcheck_score[ring]);
 }
 }
 
 
 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -283,13 +305,14 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 	err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
 	err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
 		   error->time.tv_usec);
 		   error->time.tv_usec);
 	err_printf(m, "Kernel: " UTS_RELEASE "\n");
 	err_printf(m, "Kernel: " UTS_RELEASE "\n");
-	err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
+	err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
 	err_printf(m, "EIR: 0x%08x\n", error->eir);
 	err_printf(m, "EIR: 0x%08x\n", error->eir);
 	err_printf(m, "IER: 0x%08x\n", error->ier);
 	err_printf(m, "IER: 0x%08x\n", error->ier);
 	err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
 	err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
 	err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
 	err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
 	err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
 	err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
 	err_printf(m, "CCID: 0x%08x\n", error->ccid);
 	err_printf(m, "CCID: 0x%08x\n", error->ccid);
+	err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
 
 
 	for (i = 0; i < dev_priv->num_fence_regs; i++)
 	for (i = 0; i < dev_priv->num_fence_regs; i++)
 		err_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
 		err_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
@@ -601,6 +624,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
 
 
 	/* Fences */
 	/* Fences */
 	switch (INTEL_INFO(dev)->gen) {
 	switch (INTEL_INFO(dev)->gen) {
+	case 8:
 	case 7:
 	case 7:
 	case 6:
 	case 6:
 		for (i = 0; i < dev_priv->num_fence_regs; i++)
 		for (i = 0; i < dev_priv->num_fence_regs; i++)
@@ -703,6 +727,7 @@ static void i915_record_ring_state(struct drm_device *dev,
 		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
 		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
 		if (ring->id == RCS)
 		if (ring->id == RCS)
 			error->bbaddr = I915_READ64(BB_ADDR);
 			error->bbaddr = I915_READ64(BB_ADDR);
+		error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base));
 	} else {
 	} else {
 		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
 		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
 		error->ipeir[ring->id] = I915_READ(IPEIR);
 		error->ipeir[ring->id] = I915_READ(IPEIR);
@@ -720,6 +745,9 @@ static void i915_record_ring_state(struct drm_device *dev,
 
 
 	error->cpu_ring_head[ring->id] = ring->head;
 	error->cpu_ring_head[ring->id] = ring->head;
 	error->cpu_ring_tail[ring->id] = ring->tail;
 	error->cpu_ring_tail[ring->id] = ring->tail;
+
+	error->hangcheck_score[ring->id] = ring->hangcheck.score;
+	error->hangcheck_action[ring->id] = ring->hangcheck.action;
 }
 }
 
 
 
 
@@ -769,7 +797,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
 
 
 		error->ring[i].num_requests = count;
 		error->ring[i].num_requests = count;
 		error->ring[i].requests =
 		error->ring[i].requests =
-			kmalloc(count*sizeof(struct drm_i915_error_request),
+			kcalloc(count, sizeof(*error->ring[i].requests),
 				GFP_ATOMIC);
 				GFP_ATOMIC);
 		if (error->ring[i].requests == NULL) {
 		if (error->ring[i].requests == NULL) {
 			error->ring[i].num_requests = 0;
 			error->ring[i].num_requests = 0;
@@ -811,7 +839,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
 	error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
 	error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
 
 
 	if (i) {
 	if (i) {
-		active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC);
+		active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
 		if (active_bo)
 		if (active_bo)
 			pinned_bo = active_bo + error->active_bo_count[ndx];
 			pinned_bo = active_bo + error->active_bo_count[ndx];
 	}
 	}
@@ -885,8 +913,12 @@ void i915_capture_error_state(struct drm_device *dev)
 		return;
 		return;
 	}
 	}
 
 
-	DRM_INFO("capturing error event; look for more information in "
-		 "/sys/class/drm/card%d/error\n", dev->primary->index);
+	DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
+		 dev->primary->index);
+	DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
+	DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
+	DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
+	DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
 
 
 	kref_init(&error->ref);
 	kref_init(&error->ref);
 	error->eir = I915_READ(EIR);
 	error->eir = I915_READ(EIR);
@@ -988,6 +1020,7 @@ const char *i915_cache_level_str(int type)
 	case I915_CACHE_NONE: return " uncached";
 	case I915_CACHE_NONE: return " uncached";
 	case I915_CACHE_LLC: return " snooped or LLC";
 	case I915_CACHE_LLC: return " snooped or LLC";
 	case I915_CACHE_L3_LLC: return " L3+LLC";
 	case I915_CACHE_L3_LLC: return " L3+LLC";
+	case I915_CACHE_WT: return " WT";
 	default: return "";
 	default: return "";
 	}
 	}
 }
 }
@@ -1012,6 +1045,7 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
 	default:
 	default:
 		WARN_ONCE(1, "Unsupported platform\n");
 		WARN_ONCE(1, "Unsupported platform\n");
 	case 7:
 	case 7:
+	case 8:
 		instdone[0] = I915_READ(GEN7_INSTDONE_1);
 		instdone[0] = I915_READ(GEN7_INSTDONE_1);
 		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
 		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
 		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
 		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);

+ 859 - 184
drivers/gpu/drm/i915/i915_irq.c

@@ -30,6 +30,7 @@
 
 
 #include <linux/sysrq.h>
 #include <linux/sysrq.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
+#include <linux/circ_buf.h>
 #include <drm/drmP.h>
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 #include "i915_drv.h"
@@ -269,6 +270,21 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
 	}
 	}
 }
 }
 
 
+static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
+						  enum pipe pipe, bool enable)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	if (enable)
+		dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
+	else
+		dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
+	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+}
+
 /**
 /**
  * ibx_display_interrupt_update - update SDEIMR
  * ibx_display_interrupt_update - update SDEIMR
  * @dev_priv: driver private
  * @dev_priv: driver private
@@ -381,6 +397,8 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
 		ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
 		ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
 	else if (IS_GEN7(dev))
 	else if (IS_GEN7(dev))
 		ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
 		ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
+	else if (IS_GEN8(dev))
+		broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
 
 
 done:
 done:
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -441,7 +459,7 @@ done:
 
 
 
 
 void
 void
-i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
 {
 {
 	u32 reg = PIPESTAT(pipe);
 	u32 reg = PIPESTAT(pipe);
 	u32 pipestat = I915_READ(reg) & 0x7fff0000;
 	u32 pipestat = I915_READ(reg) & 0x7fff0000;
@@ -458,7 +476,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
 }
 }
 
 
 void
 void
-i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
 {
 {
 	u32 reg = PIPESTAT(pipe);
 	u32 reg = PIPESTAT(pipe);
 	u32 pipestat = I915_READ(reg) & 0x7fff0000;
 	u32 pipestat = I915_READ(reg) & 0x7fff0000;
@@ -486,9 +504,10 @@ static void i915_enable_asle_pipestat(struct drm_device *dev)
 
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
 
-	i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
+	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE);
 	if (INTEL_INFO(dev)->gen >= 4)
 	if (INTEL_INFO(dev)->gen >= 4)
-		i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
+		i915_enable_pipestat(dev_priv, PIPE_A,
+				     PIPE_LEGACY_BLC_EVENT_ENABLE);
 
 
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 }
@@ -518,6 +537,12 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
 	}
 	}
 }
 }
 
 
+static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+	/* Gen2 doesn't have a hardware frame counter */
+	return 0;
+}
+
 /* Called from drm generic code, passed a 'crtc', which
 /* Called from drm generic code, passed a 'crtc', which
  * we use as a pipe index
  * we use as a pipe index
  */
  */
@@ -526,7 +551,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	unsigned long high_frame;
 	unsigned long high_frame;
 	unsigned long low_frame;
 	unsigned long low_frame;
-	u32 high1, high2, low;
+	u32 high1, high2, low, pixel, vbl_start;
 
 
 	if (!i915_pipe_enabled(dev, pipe)) {
 	if (!i915_pipe_enabled(dev, pipe)) {
 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
@@ -534,6 +559,24 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
 		return 0;
 		return 0;
 	}
 	}
 
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		struct intel_crtc *intel_crtc =
+			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+		const struct drm_display_mode *mode =
+			&intel_crtc->config.adjusted_mode;
+
+		vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
+	} else {
+		enum transcoder cpu_transcoder =
+			intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+		u32 htotal;
+
+		htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
+		vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
+
+		vbl_start *= htotal;
+	}
+
 	high_frame = PIPEFRAME(pipe);
 	high_frame = PIPEFRAME(pipe);
 	low_frame = PIPEFRAMEPIXEL(pipe);
 	low_frame = PIPEFRAMEPIXEL(pipe);
 
 
@@ -544,13 +587,20 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
 	 */
 	 */
 	do {
 	do {
 		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
 		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
-		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
+		low   = I915_READ(low_frame);
 		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
 		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
 	} while (high1 != high2);
 	} while (high1 != high2);
 
 
 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
+	pixel = low & PIPE_PIXEL_MASK;
 	low >>= PIPE_FRAME_LOW_SHIFT;
 	low >>= PIPE_FRAME_LOW_SHIFT;
-	return (high1 << 8) | low;
+
+	/*
+	 * The frame counter increments at beginning of active.
+	 * Cook up a vblank counter by also checking the pixel
+	 * counter against vblank start.
+	 */
+	return ((high1 << 8) | low) + (pixel >= vbl_start);
 }
 }
 
 
 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@ -567,66 +617,163 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
 	return I915_READ(reg);
 	return I915_READ(reg);
 }
 }
 
 
+/* raw reads, only for fast reads of display block, no need for forcewake etc. */
+#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
+#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
+
+static bool intel_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t status;
+	int reg;
+
+	if (IS_VALLEYVIEW(dev)) {
+		status = pipe == PIPE_A ?
+			I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
+			I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+		reg = VLV_ISR;
+	} else if (IS_GEN2(dev)) {
+		status = pipe == PIPE_A ?
+			I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
+			I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+		reg = ISR;
+	} else if (INTEL_INFO(dev)->gen < 5) {
+		status = pipe == PIPE_A ?
+			I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
+			I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+		reg = ISR;
+	} else if (INTEL_INFO(dev)->gen < 7) {
+		status = pipe == PIPE_A ?
+			DE_PIPEA_VBLANK :
+			DE_PIPEB_VBLANK;
+
+		reg = DEISR;
+	} else {
+		switch (pipe) {
+		default:
+		case PIPE_A:
+			status = DE_PIPEA_VBLANK_IVB;
+			break;
+		case PIPE_B:
+			status = DE_PIPEB_VBLANK_IVB;
+			break;
+		case PIPE_C:
+			status = DE_PIPEC_VBLANK_IVB;
+			break;
+		}
+
+		reg = DEISR;
+	}
+
+	if (IS_GEN2(dev))
+		return __raw_i915_read16(dev_priv, reg) & status;
+	else
+		return __raw_i915_read32(dev_priv, reg) & status;
+}
+
 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
-			     int *vpos, int *hpos)
+			     int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
 {
 {
-	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	u32 vbl = 0, position = 0;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+	int position;
 	int vbl_start, vbl_end, htotal, vtotal;
 	int vbl_start, vbl_end, htotal, vtotal;
 	bool in_vbl = true;
 	bool in_vbl = true;
 	int ret = 0;
 	int ret = 0;
-	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
-								      pipe);
+	unsigned long irqflags;
 
 
-	if (!i915_pipe_enabled(dev, pipe)) {
+	if (!intel_crtc->active) {
 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
 				 "pipe %c\n", pipe_name(pipe));
 				 "pipe %c\n", pipe_name(pipe));
 		return 0;
 		return 0;
 	}
 	}
 
 
-	/* Get vtotal. */
-	vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
+	htotal = mode->crtc_htotal;
+	vtotal = mode->crtc_vtotal;
+	vbl_start = mode->crtc_vblank_start;
+	vbl_end = mode->crtc_vblank_end;
 
 
-	if (INTEL_INFO(dev)->gen >= 4) {
+	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+
+	/*
+	 * Lock uncore.lock, as we will do multiple timing critical raw
+	 * register reads, potentially with preemption disabled, so the
+	 * following code must not block on uncore.lock.
+	 */
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	
+	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
+
+	/* Get optional system timestamp before query. */
+	if (stime)
+		*stime = ktime_get();
+
+	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
 		/* No obvious pixelcount register. Only query vertical
 		/* No obvious pixelcount register. Only query vertical
 		 * scanout position from Display scan line register.
 		 * scanout position from Display scan line register.
 		 */
 		 */
-		position = I915_READ(PIPEDSL(pipe));
+		if (IS_GEN2(dev))
+			position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
+		else
+			position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 
 
-		/* Decode into vertical scanout position. Don't have
-		 * horizontal scanout position.
+		/*
+		 * The scanline counter increments at the leading edge
+		 * of hsync, ie. it completely misses the active portion
+		 * of the line. Fix up the counter at both edges of vblank
+		 * to get a more accurate picture whether we're in vblank
+		 * or not.
 		 */
 		 */
-		*vpos = position & 0x1fff;
-		*hpos = 0;
+		in_vbl = intel_pipe_in_vblank_locked(dev, pipe);
+		if ((in_vbl && position == vbl_start - 1) ||
+		    (!in_vbl && position == vbl_end - 1))
+			position = (position + 1) % vtotal;
 	} else {
 	} else {
 		/* Have access to pixelcount since start of frame.
 		/* Have access to pixelcount since start of frame.
 		 * We can split this into vertical and horizontal
 		 * We can split this into vertical and horizontal
 		 * scanout position.
 		 * scanout position.
 		 */
 		 */
-		position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+		position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
 
 
-		htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
-		*vpos = position / htotal;
-		*hpos = position - (*vpos * htotal);
+		/* convert to pixel counts */
+		vbl_start *= htotal;
+		vbl_end *= htotal;
+		vtotal *= htotal;
 	}
 	}
 
 
-	/* Query vblank area. */
-	vbl = I915_READ(VBLANK(cpu_transcoder));
+	/* Get optional system timestamp after query. */
+	if (etime)
+		*etime = ktime_get();
 
 
-	/* Test position against vblank region. */
-	vbl_start = vbl & 0x1fff;
-	vbl_end = (vbl >> 16) & 0x1fff;
+	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
 
 
-	if ((*vpos < vbl_start) || (*vpos > vbl_end))
-		in_vbl = false;
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 
 
-	/* Inside "upper part" of vblank area? Apply corrective offset: */
-	if (in_vbl && (*vpos >= vbl_start))
-		*vpos = *vpos - vtotal;
+	in_vbl = position >= vbl_start && position < vbl_end;
 
 
-	/* Readouts valid? */
-	if (vbl > 0)
-		ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+	/*
+	 * While in vblank, position will be negative
+	 * counting up towards 0 at vbl_end. And outside
+	 * vblank, position will be positive counting
+	 * up since vbl_end.
+	 */
+	if (position >= vbl_start)
+		position -= vbl_end;
+	else
+		position += vtotal - vbl_end;
+
+	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+		*vpos = position;
+		*hpos = 0;
+	} else {
+		*vpos = position / htotal;
+		*hpos = position - (*vpos * htotal);
+	}
 
 
 	/* In vblank? */
 	/* In vblank? */
 	if (in_vbl)
 	if (in_vbl)
@@ -665,7 +812,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
 						     crtc);
 						     crtc);
 }
 }
 
 
-static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
+static bool intel_hpd_irq_event(struct drm_device *dev,
+				struct drm_connector *connector)
 {
 {
 	enum drm_connector_status old_status;
 	enum drm_connector_status old_status;
 
 
@@ -673,11 +821,16 @@ static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *con
 	old_status = connector->status;
 	old_status = connector->status;
 
 
 	connector->status = connector->funcs->detect(connector, false);
 	connector->status = connector->funcs->detect(connector, false);
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+	if (old_status == connector->status)
+		return false;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
 		      connector->base.id,
 		      connector->base.id,
 		      drm_get_connector_name(connector),
 		      drm_get_connector_name(connector),
-		      old_status, connector->status);
-	return (old_status != connector->status);
+		      drm_get_connector_status_name(old_status),
+		      drm_get_connector_status_name(connector->status));
+
+	return true;
 }
 }
 
 
 /*
 /*
@@ -801,7 +954,7 @@ static void notify_ring(struct drm_device *dev,
 	if (ring->obj == NULL)
 	if (ring->obj == NULL)
 		return;
 		return;
 
 
-	trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
+	trace_i915_gem_request_complete(ring);
 
 
 	wake_up_all(&ring->irq_queue);
 	wake_up_all(&ring->irq_queue);
 	i915_queue_hangcheck(dev);
 	i915_queue_hangcheck(dev);
@@ -812,7 +965,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
 						    rps.work);
 						    rps.work);
 	u32 pm_iir;
 	u32 pm_iir;
-	u8 new_delay;
+	int new_delay, adj;
 
 
 	spin_lock_irq(&dev_priv->irq_lock);
 	spin_lock_irq(&dev_priv->irq_lock);
 	pm_iir = dev_priv->rps.pm_iir;
 	pm_iir = dev_priv->rps.pm_iir;
@@ -829,40 +982,49 @@ static void gen6_pm_rps_work(struct work_struct *work)
 
 
 	mutex_lock(&dev_priv->rps.hw_lock);
 	mutex_lock(&dev_priv->rps.hw_lock);
 
 
+	adj = dev_priv->rps.last_adj;
 	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
 	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
-		new_delay = dev_priv->rps.cur_delay + 1;
+		if (adj > 0)
+			adj *= 2;
+		else
+			adj = 1;
+		new_delay = dev_priv->rps.cur_delay + adj;
 
 
 		/*
 		/*
 		 * For better performance, jump directly
 		 * For better performance, jump directly
 		 * to RPe if we're below it.
 		 * to RPe if we're below it.
 		 */
 		 */
-		if (IS_VALLEYVIEW(dev_priv->dev) &&
-		    dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
+		if (new_delay < dev_priv->rps.rpe_delay)
+			new_delay = dev_priv->rps.rpe_delay;
+	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
+		if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
 			new_delay = dev_priv->rps.rpe_delay;
 			new_delay = dev_priv->rps.rpe_delay;
-	} else
-		new_delay = dev_priv->rps.cur_delay - 1;
+		else
+			new_delay = dev_priv->rps.min_delay;
+		adj = 0;
+	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
+		if (adj < 0)
+			adj *= 2;
+		else
+			adj = -1;
+		new_delay = dev_priv->rps.cur_delay + adj;
+	} else { /* unknown event */
+		new_delay = dev_priv->rps.cur_delay;
+	}
 
 
 	/* sysfs frequency interfaces may have snuck in while servicing the
 	/* sysfs frequency interfaces may have snuck in while servicing the
 	 * interrupt
 	 * interrupt
 	 */
 	 */
-	if (new_delay >= dev_priv->rps.min_delay &&
-	    new_delay <= dev_priv->rps.max_delay) {
-		if (IS_VALLEYVIEW(dev_priv->dev))
-			valleyview_set_rps(dev_priv->dev, new_delay);
-		else
-			gen6_set_rps(dev_priv->dev, new_delay);
-	}
-
-	if (IS_VALLEYVIEW(dev_priv->dev)) {
-		/*
-		 * On VLV, when we enter RC6 we may not be at the minimum
-		 * voltage level, so arm a timer to check.  It should only
-		 * fire when there's activity or once after we've entered
-		 * RC6, and then won't be re-armed until the next RPS interrupt.
-		 */
-		mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
-				 msecs_to_jiffies(100));
-	}
+	if (new_delay < (int)dev_priv->rps.min_delay)
+		new_delay = dev_priv->rps.min_delay;
+	if (new_delay > (int)dev_priv->rps.max_delay)
+		new_delay = dev_priv->rps.max_delay;
+	dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
+
+	if (IS_VALLEYVIEW(dev_priv->dev))
+		valleyview_set_rps(dev_priv->dev, new_delay);
+	else
+		gen6_set_rps(dev_priv->dev, new_delay);
 
 
 	mutex_unlock(&dev_priv->rps.hw_lock);
 	mutex_unlock(&dev_priv->rps.hw_lock);
 }
 }
@@ -882,9 +1044,10 @@ static void ivybridge_parity_work(struct work_struct *work)
 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
 						    l3_parity.error_work);
 						    l3_parity.error_work);
 	u32 error_status, row, bank, subbank;
 	u32 error_status, row, bank, subbank;
-	char *parity_event[5];
+	char *parity_event[6];
 	uint32_t misccpctl;
 	uint32_t misccpctl;
 	unsigned long flags;
 	unsigned long flags;
+	uint8_t slice = 0;
 
 
 	/* We must turn off DOP level clock gating to access the L3 registers.
 	/* We must turn off DOP level clock gating to access the L3 registers.
 	 * In order to prevent a get/put style interface, acquire struct mutex
 	 * In order to prevent a get/put style interface, acquire struct mutex
@@ -892,55 +1055,81 @@ static void ivybridge_parity_work(struct work_struct *work)
 	 */
 	 */
 	mutex_lock(&dev_priv->dev->struct_mutex);
 	mutex_lock(&dev_priv->dev->struct_mutex);
 
 
+	/* If we've screwed up tracking, just let the interrupt fire again */
+	if (WARN_ON(!dev_priv->l3_parity.which_slice))
+		goto out;
+
 	misccpctl = I915_READ(GEN7_MISCCPCTL);
 	misccpctl = I915_READ(GEN7_MISCCPCTL);
 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
 	POSTING_READ(GEN7_MISCCPCTL);
 	POSTING_READ(GEN7_MISCCPCTL);
 
 
-	error_status = I915_READ(GEN7_L3CDERRST1);
-	row = GEN7_PARITY_ERROR_ROW(error_status);
-	bank = GEN7_PARITY_ERROR_BANK(error_status);
-	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
+	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
+		u32 reg;
 
 
-	I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
-				    GEN7_L3CDERRST1_ENABLE);
-	POSTING_READ(GEN7_L3CDERRST1);
+		slice--;
+		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
+			break;
 
 
-	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+		dev_priv->l3_parity.which_slice &= ~(1<<slice);
 
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+		reg = GEN7_L3CDERRST1 + (slice * 0x200);
 
 
-	mutex_unlock(&dev_priv->dev->struct_mutex);
+		error_status = I915_READ(reg);
+		row = GEN7_PARITY_ERROR_ROW(error_status);
+		bank = GEN7_PARITY_ERROR_BANK(error_status);
+		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
 
 
-	parity_event[0] = I915_L3_PARITY_UEVENT "=1";
-	parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
-	parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
-	parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
-	parity_event[4] = NULL;
+		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
+		POSTING_READ(reg);
+
+		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
+		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
+		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
+		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
+		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
+		parity_event[5] = NULL;
+
+		kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
+				   KOBJ_CHANGE, parity_event);
+
+		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
+			  slice, row, bank, subbank);
+
+		kfree(parity_event[4]);
+		kfree(parity_event[3]);
+		kfree(parity_event[2]);
+		kfree(parity_event[1]);
+	}
 
 
-	kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
-			   KOBJ_CHANGE, parity_event);
+	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
 
 
-	DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
-		  row, bank, subbank);
+out:
+	WARN_ON(dev_priv->l3_parity.which_slice);
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
 
-	kfree(parity_event[3]);
-	kfree(parity_event[2]);
-	kfree(parity_event[1]);
+	mutex_unlock(&dev_priv->dev->struct_mutex);
 }
 }
 
 
-static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
+static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
 {
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
 
-	if (!HAS_L3_GPU_CACHE(dev))
+	if (!HAS_L3_DPF(dev))
 		return;
 		return;
 
 
 	spin_lock(&dev_priv->irq_lock);
 	spin_lock(&dev_priv->irq_lock);
-	ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+	ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
 	spin_unlock(&dev_priv->irq_lock);
 	spin_unlock(&dev_priv->irq_lock);
 
 
+	iir &= GT_PARITY_ERROR(dev);
+	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
+		dev_priv->l3_parity.which_slice |= 1 << 1;
+
+	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
+		dev_priv->l3_parity.which_slice |= 1 << 0;
+
 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
 }
 }
 
 
@@ -975,8 +1164,58 @@ static void snb_gt_irq_handler(struct drm_device *dev,
 		i915_handle_error(dev, false);
 		i915_handle_error(dev, false);
 	}
 	}
 
 
-	if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
-		ivybridge_parity_error_irq_handler(dev);
+	if (gt_iir & GT_PARITY_ERROR(dev))
+		ivybridge_parity_error_irq_handler(dev, gt_iir);
+}
+
+static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
+				       struct drm_i915_private *dev_priv,
+				       u32 master_ctl)
+{
+	u32 rcs, bcs, vcs;
+	uint32_t tmp = 0;
+	irqreturn_t ret = IRQ_NONE;
+
+	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
+		tmp = I915_READ(GEN8_GT_IIR(0));
+		if (tmp) {
+			ret = IRQ_HANDLED;
+			rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
+			bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
+			if (rcs & GT_RENDER_USER_INTERRUPT)
+				notify_ring(dev, &dev_priv->ring[RCS]);
+			if (bcs & GT_RENDER_USER_INTERRUPT)
+				notify_ring(dev, &dev_priv->ring[BCS]);
+			I915_WRITE(GEN8_GT_IIR(0), tmp);
+		} else
+			DRM_ERROR("The master control interrupt lied (GT0)!\n");
+	}
+
+	if (master_ctl & GEN8_GT_VCS1_IRQ) {
+		tmp = I915_READ(GEN8_GT_IIR(1));
+		if (tmp) {
+			ret = IRQ_HANDLED;
+			vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
+			if (vcs & GT_RENDER_USER_INTERRUPT)
+				notify_ring(dev, &dev_priv->ring[VCS]);
+			I915_WRITE(GEN8_GT_IIR(1), tmp);
+		} else
+			DRM_ERROR("The master control interrupt lied (GT1)!\n");
+	}
+
+	if (master_ctl & GEN8_GT_VECS_IRQ) {
+		tmp = I915_READ(GEN8_GT_IIR(3));
+		if (tmp) {
+			ret = IRQ_HANDLED;
+			vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
+			if (vcs & GT_RENDER_USER_INTERRUPT)
+				notify_ring(dev, &dev_priv->ring[VECS]);
+			I915_WRITE(GEN8_GT_IIR(3), tmp);
+		} else
+			DRM_ERROR("The master control interrupt lied (GT3)!\n");
+	}
+
+	return ret;
 }
 }
 
 
 #define HPD_STORM_DETECT_PERIOD 1000
 #define HPD_STORM_DETECT_PERIOD 1000
@@ -1050,6 +1289,102 @@ static void dp_aux_irq_handler(struct drm_device *dev)
 	wake_up_all(&dev_priv->gmbus_wait_queue);
 	wake_up_all(&dev_priv->gmbus_wait_queue);
 }
 }
 
 
+#if defined(CONFIG_DEBUG_FS)
+static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+					 uint32_t crc0, uint32_t crc1,
+					 uint32_t crc2, uint32_t crc3,
+					 uint32_t crc4)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+	struct intel_pipe_crc_entry *entry;
+	int head, tail;
+
+	spin_lock(&pipe_crc->lock);
+
+	if (!pipe_crc->entries) {
+		spin_unlock(&pipe_crc->lock);
+		DRM_ERROR("spurious interrupt\n");
+		return;
+	}
+
+	head = pipe_crc->head;
+	tail = pipe_crc->tail;
+
+	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
+		spin_unlock(&pipe_crc->lock);
+		DRM_ERROR("CRC buffer overflowing\n");
+		return;
+	}
+
+	entry = &pipe_crc->entries[head];
+
+	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
+	entry->crc[0] = crc0;
+	entry->crc[1] = crc1;
+	entry->crc[2] = crc2;
+	entry->crc[3] = crc3;
+	entry->crc[4] = crc4;
+
+	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+	pipe_crc->head = head;
+
+	spin_unlock(&pipe_crc->lock);
+
+	wake_up_interruptible(&pipe_crc->wq);
+}
+#else
+static inline void
+display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+			     uint32_t crc0, uint32_t crc1,
+			     uint32_t crc2, uint32_t crc3,
+			     uint32_t crc4) {}
+#endif
+
+
+static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	display_pipe_crc_irq_handler(dev, pipe,
+				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
+				     0, 0, 0, 0);
+}
+
+static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	display_pipe_crc_irq_handler(dev, pipe,
+				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
+				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
+				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
+				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
+				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
+}
+
+static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t res1, res2;
+
+	if (INTEL_INFO(dev)->gen >= 3)
+		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
+	else
+		res1 = 0;
+
+	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
+	else
+		res2 = 0;
+
+	display_pipe_crc_irq_handler(dev, pipe,
+				     I915_READ(PIPE_CRC_RES_RED(pipe)),
+				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
+				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
+				     res1, res2);
+}
+
 /* The RPS events need forcewake, so we add them to a work queue and mask their
 /* The RPS events need forcewake, so we add them to a work queue and mask their
  * IMR bits until the work is done. Other interrupts can be processed without
  * IMR bits until the work is done. Other interrupts can be processed without
  * the work queue. */
  * the work queue. */
@@ -1117,13 +1452,16 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
 
 		for_each_pipe(pipe) {
 		for_each_pipe(pipe) {
-			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
+			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
 				drm_handle_vblank(dev, pipe);
 				drm_handle_vblank(dev, pipe);
 
 
 			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
 			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
 				intel_prepare_page_flip(dev, pipe);
 				intel_prepare_page_flip(dev, pipe);
 				intel_finish_page_flip(dev, pipe);
 				intel_finish_page_flip(dev, pipe);
 			}
 			}
+
+			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+				i9xx_pipe_crc_irq_handler(dev, pipe);
 		}
 		}
 
 
 		/* Consume port.  Then clear IIR or we'll miss events */
 		/* Consume port.  Then clear IIR or we'll miss events */
@@ -1212,21 +1550,26 @@ static void ivb_err_int_handler(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 err_int = I915_READ(GEN7_ERR_INT);
 	u32 err_int = I915_READ(GEN7_ERR_INT);
+	enum pipe pipe;
 
 
 	if (err_int & ERR_INT_POISON)
 	if (err_int & ERR_INT_POISON)
 		DRM_ERROR("Poison interrupt\n");
 		DRM_ERROR("Poison interrupt\n");
 
 
-	if (err_int & ERR_INT_FIFO_UNDERRUN_A)
-		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
-			DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
-
-	if (err_int & ERR_INT_FIFO_UNDERRUN_B)
-		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
-			DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
+	for_each_pipe(pipe) {
+		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
+			if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
+								  false))
+				DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
+						 pipe_name(pipe));
+		}
 
 
-	if (err_int & ERR_INT_FIFO_UNDERRUN_C)
-		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
-			DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
+		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
+			if (IS_IVYBRIDGE(dev))
+				ivb_pipe_crc_irq_handler(dev, pipe);
+			else
+				hsw_pipe_crc_irq_handler(dev, pipe);
+		}
+	}
 
 
 	I915_WRITE(GEN7_ERR_INT, err_int);
 	I915_WRITE(GEN7_ERR_INT, err_int);
 }
 }
@@ -1297,6 +1640,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe;
 
 
 	if (de_iir & DE_AUX_CHANNEL_A)
 	if (de_iir & DE_AUX_CHANNEL_A)
 		dp_aux_irq_handler(dev);
 		dp_aux_irq_handler(dev);
@@ -1304,31 +1648,26 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
 	if (de_iir & DE_GSE)
 	if (de_iir & DE_GSE)
 		intel_opregion_asle_intr(dev);
 		intel_opregion_asle_intr(dev);
 
 
-	if (de_iir & DE_PIPEA_VBLANK)
-		drm_handle_vblank(dev, 0);
-
-	if (de_iir & DE_PIPEB_VBLANK)
-		drm_handle_vblank(dev, 1);
-
 	if (de_iir & DE_POISON)
 	if (de_iir & DE_POISON)
 		DRM_ERROR("Poison interrupt\n");
 		DRM_ERROR("Poison interrupt\n");
 
 
-	if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
-		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
-			DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
+	for_each_pipe(pipe) {
+		if (de_iir & DE_PIPE_VBLANK(pipe))
+			drm_handle_vblank(dev, pipe);
 
 
-	if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
-		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
-			DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
+		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
+			if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+				DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
+						 pipe_name(pipe));
 
 
-	if (de_iir & DE_PLANEA_FLIP_DONE) {
-		intel_prepare_page_flip(dev, 0);
-		intel_finish_page_flip_plane(dev, 0);
-	}
+		if (de_iir & DE_PIPE_CRC_DONE(pipe))
+			i9xx_pipe_crc_irq_handler(dev, pipe);
 
 
-	if (de_iir & DE_PLANEB_FLIP_DONE) {
-		intel_prepare_page_flip(dev, 1);
-		intel_finish_page_flip_plane(dev, 1);
+		/* plane/pipes map 1:1 on ilk+ */
+		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
+			intel_prepare_page_flip(dev, pipe);
+			intel_finish_page_flip_plane(dev, pipe);
+		}
 	}
 	}
 
 
 	/* check event from PCH */
 	/* check event from PCH */
@@ -1351,7 +1690,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int i;
+	enum pipe i;
 
 
 	if (de_iir & DE_ERR_INT_IVB)
 	if (de_iir & DE_ERR_INT_IVB)
 		ivb_err_int_handler(dev);
 		ivb_err_int_handler(dev);
@@ -1362,10 +1701,12 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
 	if (de_iir & DE_GSE_IVB)
 	if (de_iir & DE_GSE_IVB)
 		intel_opregion_asle_intr(dev);
 		intel_opregion_asle_intr(dev);
 
 
-	for (i = 0; i < 3; i++) {
-		if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+	for_each_pipe(i) {
+		if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
 			drm_handle_vblank(dev, i);
 			drm_handle_vblank(dev, i);
-		if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
+
+		/* plane/pipes map 1:1 on ilk+ */
+		if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
 			intel_prepare_page_flip(dev, i);
 			intel_prepare_page_flip(dev, i);
 			intel_finish_page_flip_plane(dev, i);
 			intel_finish_page_flip_plane(dev, i);
 		}
 		}
@@ -1388,7 +1729,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
 	irqreturn_t ret = IRQ_NONE;
 	irqreturn_t ret = IRQ_NONE;
-	bool err_int_reenable = false;
 
 
 	atomic_inc(&dev_priv->irq_received);
 	atomic_inc(&dev_priv->irq_received);
 
 
@@ -1412,17 +1752,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 		POSTING_READ(SDEIER);
 		POSTING_READ(SDEIER);
 	}
 	}
 
 
-	/* On Haswell, also mask ERR_INT because we don't want to risk
-	 * generating "unclaimed register" interrupts from inside the interrupt
-	 * handler. */
-	if (IS_HASWELL(dev)) {
-		spin_lock(&dev_priv->irq_lock);
-		err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
-		if (err_int_reenable)
-			ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
-		spin_unlock(&dev_priv->irq_lock);
-	}
-
 	gt_iir = I915_READ(GTIIR);
 	gt_iir = I915_READ(GTIIR);
 	if (gt_iir) {
 	if (gt_iir) {
 		if (INTEL_INFO(dev)->gen >= 6)
 		if (INTEL_INFO(dev)->gen >= 6)
@@ -1452,13 +1781,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 		}
 		}
 	}
 	}
 
 
-	if (err_int_reenable) {
-		spin_lock(&dev_priv->irq_lock);
-		if (ivb_can_enable_err_int(dev))
-			ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
-		spin_unlock(&dev_priv->irq_lock);
-	}
-
 	I915_WRITE(DEIER, de_ier);
 	I915_WRITE(DEIER, de_ier);
 	POSTING_READ(DEIER);
 	POSTING_READ(DEIER);
 	if (!HAS_PCH_NOP(dev)) {
 	if (!HAS_PCH_NOP(dev)) {
@@ -1469,6 +1791,117 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 	return ret;
 	return ret;
 }
 }
 
 
+static irqreturn_t gen8_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = arg;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 master_ctl;
+	irqreturn_t ret = IRQ_NONE;
+	uint32_t tmp = 0;
+	enum pipe pipe;
+
+	atomic_inc(&dev_priv->irq_received);
+
+	master_ctl = I915_READ(GEN8_MASTER_IRQ);
+	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
+	if (!master_ctl)
+		return IRQ_NONE;
+
+	I915_WRITE(GEN8_MASTER_IRQ, 0);
+	POSTING_READ(GEN8_MASTER_IRQ);
+
+	ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
+
+	if (master_ctl & GEN8_DE_MISC_IRQ) {
+		tmp = I915_READ(GEN8_DE_MISC_IIR);
+		if (tmp & GEN8_DE_MISC_GSE)
+			intel_opregion_asle_intr(dev);
+		else if (tmp)
+			DRM_ERROR("Unexpected DE Misc interrupt\n");
+		else
+			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
+
+		if (tmp) {
+			I915_WRITE(GEN8_DE_MISC_IIR, tmp);
+			ret = IRQ_HANDLED;
+		}
+	}
+
+	if (master_ctl & GEN8_DE_PORT_IRQ) {
+		tmp = I915_READ(GEN8_DE_PORT_IIR);
+		if (tmp & GEN8_AUX_CHANNEL_A)
+			dp_aux_irq_handler(dev);
+		else if (tmp)
+			DRM_ERROR("Unexpected DE Port interrupt\n");
+		else
+			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
+
+		if (tmp) {
+			I915_WRITE(GEN8_DE_PORT_IIR, tmp);
+			ret = IRQ_HANDLED;
+		}
+	}
+
+	for_each_pipe(pipe) {
+		uint32_t pipe_iir;
+
+		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
+			continue;
+
+		pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
+		if (pipe_iir & GEN8_PIPE_VBLANK)
+			drm_handle_vblank(dev, pipe);
+
+		if (pipe_iir & GEN8_PIPE_FLIP_DONE) {
+			intel_prepare_page_flip(dev, pipe);
+			intel_finish_page_flip_plane(dev, pipe);
+		}
+
+		if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
+			hsw_pipe_crc_irq_handler(dev, pipe);
+
+		if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
+			if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
+								  false))
+				DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
+						 pipe_name(pipe));
+		}
+
+		if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
+			DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
+				  pipe_name(pipe),
+				  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
+		}
+
+		if (pipe_iir) {
+			ret = IRQ_HANDLED;
+			I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+		} else
+			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
+	}
+
+	if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
+		/*
+		 * FIXME(BDW): Assume for now that the new interrupt handling
+		 * scheme also closed the SDE interrupt handling race we've seen
+		 * on older pch-split platforms. But this needs testing.
+		 */
+		u32 pch_iir = I915_READ(SDEIIR);
+
+		cpt_irq_handler(dev, pch_iir);
+
+		if (pch_iir) {
+			I915_WRITE(SDEIIR, pch_iir);
+			ret = IRQ_HANDLED;
+		}
+	}
+
+	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+	POSTING_READ(GEN8_MASTER_IRQ);
+
+	return ret;
+}
+
 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
 			       bool reset_completed)
 			       bool reset_completed)
 {
 {
@@ -1516,7 +1949,7 @@ static void i915_error_work_func(struct work_struct *work)
 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
 	int ret;
 	int ret;
 
 
-	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
+	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
 
 
 	/*
 	/*
 	 * Note that there's only one work item which does gpu resets, so we
 	 * Note that there's only one work item which does gpu resets, so we
@@ -1530,7 +1963,7 @@ static void i915_error_work_func(struct work_struct *work)
 	 */
 	 */
 	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
 	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
 		DRM_DEBUG_DRIVER("resetting chip\n");
 		DRM_DEBUG_DRIVER("resetting chip\n");
-		kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
+		kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
 				   reset_event);
 				   reset_event);
 
 
 		/*
 		/*
@@ -1557,7 +1990,7 @@ static void i915_error_work_func(struct work_struct *work)
 			smp_mb__before_atomic_inc();
 			smp_mb__before_atomic_inc();
 			atomic_inc(&dev_priv->gpu_error.reset_counter);
 			atomic_inc(&dev_priv->gpu_error.reset_counter);
 
 
-			kobject_uevent_env(&dev->primary->kdev.kobj,
+			kobject_uevent_env(&dev->primary->kdev->kobj,
 					   KOBJ_CHANGE, reset_done_event);
 					   KOBJ_CHANGE, reset_done_event);
 		} else {
 		} else {
 			atomic_set(&error->reset_counter, I915_WEDGED);
 			atomic_set(&error->reset_counter, I915_WEDGED);
@@ -1787,7 +2220,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	unsigned long irqflags;
 	unsigned long irqflags;
 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
-						     DE_PIPE_VBLANK_ILK(pipe);
+						     DE_PIPE_VBLANK(pipe);
 
 
 	if (!i915_pipe_enabled(dev, pipe))
 	if (!i915_pipe_enabled(dev, pipe))
 		return -EINVAL;
 		return -EINVAL;
@@ -1810,7 +2243,7 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
 
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 	imr = I915_READ(VLV_IMR);
 	imr = I915_READ(VLV_IMR);
-	if (pipe == 0)
+	if (pipe == PIPE_A)
 		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
 		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
 	else
 	else
 		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
 		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
@@ -1822,6 +2255,22 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
 	return 0;
 	return 0;
 }
 }
 
 
+static int gen8_enable_vblank(struct drm_device *dev, int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long irqflags;
+
+	if (!i915_pipe_enabled(dev, pipe))
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
+	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	return 0;
+}
+
 /* Called from drm generic code, passed 'crtc' which
 /* Called from drm generic code, passed 'crtc' which
  * we use as a pipe index
  * we use as a pipe index
  */
  */
@@ -1845,7 +2294,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	unsigned long irqflags;
 	unsigned long irqflags;
 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
-						     DE_PIPE_VBLANK_ILK(pipe);
+						     DE_PIPE_VBLANK(pipe);
 
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 	ironlake_disable_display_irq(dev_priv, bit);
 	ironlake_disable_display_irq(dev_priv, bit);
@@ -1862,7 +2311,7 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
 	i915_disable_pipestat(dev_priv, pipe,
 	i915_disable_pipestat(dev_priv, pipe,
 			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
 			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
 	imr = I915_READ(VLV_IMR);
 	imr = I915_READ(VLV_IMR);
-	if (pipe == 0)
+	if (pipe == PIPE_A)
 		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
 		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
 	else
 	else
 		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
 		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
@@ -1870,6 +2319,21 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 }
 
 
+static void gen8_disable_vblank(struct drm_device *dev, int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long irqflags;
+
+	if (!i915_pipe_enabled(dev, pipe))
+		return;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
+	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
 static u32
 static u32
 ring_last_seqno(struct intel_ring_buffer *ring)
 ring_last_seqno(struct intel_ring_buffer *ring)
 {
 {
@@ -1965,6 +2429,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
 	if (tmp & RING_WAIT) {
 	if (tmp & RING_WAIT) {
 		DRM_ERROR("Kicking stuck wait on %s\n",
 		DRM_ERROR("Kicking stuck wait on %s\n",
 			  ring->name);
 			  ring->name);
+		i915_handle_error(dev, false);
 		I915_WRITE_CTL(ring, tmp);
 		I915_WRITE_CTL(ring, tmp);
 		return HANGCHECK_KICK;
 		return HANGCHECK_KICK;
 	}
 	}
@@ -1976,6 +2441,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
 		case 1:
 		case 1:
 			DRM_ERROR("Kicking stuck semaphore on %s\n",
 			DRM_ERROR("Kicking stuck semaphore on %s\n",
 				  ring->name);
 				  ring->name);
+			i915_handle_error(dev, false);
 			I915_WRITE_CTL(ring, tmp);
 			I915_WRITE_CTL(ring, tmp);
 			return HANGCHECK_KICK;
 			return HANGCHECK_KICK;
 		case 0:
 		case 0:
@@ -2021,12 +2487,21 @@ static void i915_hangcheck_elapsed(unsigned long data)
 
 
 		if (ring->hangcheck.seqno == seqno) {
 		if (ring->hangcheck.seqno == seqno) {
 			if (ring_idle(ring, seqno)) {
 			if (ring_idle(ring, seqno)) {
+				ring->hangcheck.action = HANGCHECK_IDLE;
+
 				if (waitqueue_active(&ring->irq_queue)) {
 				if (waitqueue_active(&ring->irq_queue)) {
 					/* Issue a wake-up to catch stuck h/w. */
 					/* Issue a wake-up to catch stuck h/w. */
-					DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
-						  ring->name);
-					wake_up_all(&ring->irq_queue);
-					ring->hangcheck.score += HUNG;
+					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
+						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
+							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+								  ring->name);
+						else
+							DRM_INFO("Fake missed irq on %s\n",
+								 ring->name);
+						wake_up_all(&ring->irq_queue);
+					}
+					/* Safeguard against driver failure */
+					ring->hangcheck.score += BUSY;
 				} else
 				} else
 					busy = false;
 					busy = false;
 			} else {
 			} else {
@@ -2049,6 +2524,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
 								    acthd);
 								    acthd);
 
 
 				switch (ring->hangcheck.action) {
 				switch (ring->hangcheck.action) {
+				case HANGCHECK_IDLE:
 				case HANGCHECK_WAIT:
 				case HANGCHECK_WAIT:
 					break;
 					break;
 				case HANGCHECK_ACTIVE:
 				case HANGCHECK_ACTIVE:
@@ -2064,6 +2540,8 @@ static void i915_hangcheck_elapsed(unsigned long data)
 				}
 				}
 			}
 			}
 		} else {
 		} else {
+			ring->hangcheck.action = HANGCHECK_ACTIVE;
+
 			/* Gradually reduce the count so that we catch DoS
 			/* Gradually reduce the count so that we catch DoS
 			 * attempts across multiple batches.
 			 * attempts across multiple batches.
 			 */
 			 */
@@ -2190,6 +2668,53 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
 	POSTING_READ(VLV_IER);
 	POSTING_READ(VLV_IER);
 }
 }
 
 
+static void gen8_irq_preinstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe;
+
+	atomic_set(&dev_priv->irq_received, 0);
+
+	I915_WRITE(GEN8_MASTER_IRQ, 0);
+	POSTING_READ(GEN8_MASTER_IRQ);
+
+	/* IIR can theoretically queue up two events. Be paranoid */
+#define GEN8_IRQ_INIT_NDX(type, which) do { \
+		I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
+		POSTING_READ(GEN8_##type##_IMR(which)); \
+		I915_WRITE(GEN8_##type##_IER(which), 0); \
+		I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
+		POSTING_READ(GEN8_##type##_IIR(which)); \
+		I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
+	} while (0)
+
+#define GEN8_IRQ_INIT(type) do { \
+		I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
+		POSTING_READ(GEN8_##type##_IMR); \
+		I915_WRITE(GEN8_##type##_IER, 0); \
+		I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
+		POSTING_READ(GEN8_##type##_IIR); \
+		I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
+	} while (0)
+
+	GEN8_IRQ_INIT_NDX(GT, 0);
+	GEN8_IRQ_INIT_NDX(GT, 1);
+	GEN8_IRQ_INIT_NDX(GT, 2);
+	GEN8_IRQ_INIT_NDX(GT, 3);
+
+	for_each_pipe(pipe) {
+		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe);
+	}
+
+	GEN8_IRQ_INIT(DE_PORT);
+	GEN8_IRQ_INIT(DE_MISC);
+	GEN8_IRQ_INIT(PCU);
+#undef GEN8_IRQ_INIT
+#undef GEN8_IRQ_INIT_NDX
+
+	POSTING_READ(GEN8_PCU_IIR);
+}
+
 static void ibx_hpd_irq_setup(struct drm_device *dev)
 static void ibx_hpd_irq_setup(struct drm_device *dev)
 {
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2254,10 +2779,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
 	pm_irqs = gt_irqs = 0;
 	pm_irqs = gt_irqs = 0;
 
 
 	dev_priv->gt_irq_mask = ~0;
 	dev_priv->gt_irq_mask = ~0;
-	if (HAS_L3_GPU_CACHE(dev)) {
+	if (HAS_L3_DPF(dev)) {
 		/* L3 parity interrupt is always unmasked. */
 		/* L3 parity interrupt is always unmasked. */
-		dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-		gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
+		gt_irqs |= GT_PARITY_ERROR(dev);
 	}
 	}
 
 
 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
@@ -2306,8 +2831,10 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
 	} else {
 	} else {
 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
 				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
 				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
-				DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
-				DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
+				DE_AUX_CHANNEL_A |
+				DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
+				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
+				DE_POISON);
 		extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
 		extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
 	}
 	}
 
 
@@ -2341,7 +2868,8 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
 {
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	u32 enable_mask;
 	u32 enable_mask;
-	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
+	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
+		PIPE_CRC_DONE_ENABLE;
 	unsigned long irqflags;
 	unsigned long irqflags;
 
 
 	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
 	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
@@ -2371,9 +2899,9 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
 	/* Interrupt setup is already guaranteed to be single-threaded, this is
 	/* Interrupt setup is already guaranteed to be single-threaded, this is
 	 * just to make the assert_spin_locked check happy. */
 	 * just to make the assert_spin_locked check happy. */
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	i915_enable_pipestat(dev_priv, 0, pipestat_enable);
-	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
-	i915_enable_pipestat(dev_priv, 1, pipestat_enable);
+	i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
+	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
+	i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
 
 	I915_WRITE(VLV_IIR, 0xffffffff);
 	I915_WRITE(VLV_IIR, 0xffffffff);
@@ -2392,6 +2920,117 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
 	return 0;
 	return 0;
 }
 }
 
 
+static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+	int i;
+
+	/* These are interrupts we'll toggle with the ring mask register */
+	uint32_t gt_interrupts[] = {
+		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
+			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
+		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
+			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
+		0,
+		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
+		};
+
+	for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) {
+		u32 tmp = I915_READ(GEN8_GT_IIR(i));
+		if (tmp)
+			DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
+				  i, tmp);
+		I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]);
+		I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]);
+	}
+	POSTING_READ(GEN8_GT_IER(0));
+}
+
+static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
+		GEN8_PIPE_CDCLK_CRC_DONE |
+		GEN8_PIPE_FIFO_UNDERRUN |
+		GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+	uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
+	int pipe;
+	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
+	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
+	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
+
+	for_each_pipe(pipe) {
+		u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe));
+		if (tmp)
+			DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
+				  pipe, tmp);
+		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
+		I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables);
+	}
+	POSTING_READ(GEN8_DE_PIPE_ISR(0));
+
+	I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A);
+	I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A);
+	POSTING_READ(GEN8_DE_PORT_IER);
+}
+
+static int gen8_irq_postinstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	gen8_gt_irq_postinstall(dev_priv);
+	gen8_de_irq_postinstall(dev_priv);
+
+	ibx_irq_postinstall(dev);
+
+	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
+	POSTING_READ(GEN8_MASTER_IRQ);
+
+	return 0;
+}
+
+static void gen8_irq_uninstall(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe;
+
+	if (!dev_priv)
+		return;
+
+	atomic_set(&dev_priv->irq_received, 0);
+
+	I915_WRITE(GEN8_MASTER_IRQ, 0);
+
+#define GEN8_IRQ_FINI_NDX(type, which) do { \
+		I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
+		I915_WRITE(GEN8_##type##_IER(which), 0); \
+		I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
+	} while (0)
+
+#define GEN8_IRQ_FINI(type) do { \
+		I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
+		I915_WRITE(GEN8_##type##_IER, 0); \
+		I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
+	} while (0)
+
+	GEN8_IRQ_FINI_NDX(GT, 0);
+	GEN8_IRQ_FINI_NDX(GT, 1);
+	GEN8_IRQ_FINI_NDX(GT, 2);
+	GEN8_IRQ_FINI_NDX(GT, 3);
+
+	for_each_pipe(pipe) {
+		GEN8_IRQ_FINI_NDX(DE_PIPE, pipe);
+	}
+
+	GEN8_IRQ_FINI(DE_PORT);
+	GEN8_IRQ_FINI(DE_MISC);
+	GEN8_IRQ_FINI(PCU);
+#undef GEN8_IRQ_FINI
+#undef GEN8_IRQ_FINI_NDX
+
+	POSTING_READ(GEN8_PCU_IIR);
+}
+
 static void valleyview_irq_uninstall(struct drm_device *dev)
 static void valleyview_irq_uninstall(struct drm_device *dev)
 {
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2464,6 +3103,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
 static int i8xx_irq_postinstall(struct drm_device *dev)
 static int i8xx_irq_postinstall(struct drm_device *dev)
 {
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long irqflags;
 
 
 	I915_WRITE16(EMR,
 	I915_WRITE16(EMR,
 		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
 		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -2484,6 +3124,13 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
 		     I915_USER_INTERRUPT);
 		     I915_USER_INTERRUPT);
 	POSTING_READ16(IER);
 	POSTING_READ16(IER);
 
 
+	/* Interrupt setup is already guaranteed to be single-threaded, this is
+	 * just to make the assert_spin_locked check happy. */
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
+	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -2570,13 +3217,14 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
 		if (iir & I915_USER_INTERRUPT)
 		if (iir & I915_USER_INTERRUPT)
 			notify_ring(dev, &dev_priv->ring[RCS]);
 			notify_ring(dev, &dev_priv->ring[RCS]);
 
 
-		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
-		    i8xx_handle_vblank(dev, 0, iir))
-			flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
+		for_each_pipe(pipe) {
+			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
+			    i8xx_handle_vblank(dev, pipe, iir))
+				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
 
 
-		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
-		    i8xx_handle_vblank(dev, 1, iir))
-			flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
+			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+				i9xx_pipe_crc_irq_handler(dev, pipe);
+		}
 
 
 		iir = new_iir;
 		iir = new_iir;
 	}
 	}
@@ -2623,6 +3271,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
 {
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	u32 enable_mask;
 	u32 enable_mask;
+	unsigned long irqflags;
 
 
 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
 
 
@@ -2658,6 +3307,13 @@ static int i915_irq_postinstall(struct drm_device *dev)
 
 
 	i915_enable_asle_pipestat(dev);
 	i915_enable_asle_pipestat(dev);
 
 
+	/* Interrupt setup is already guaranteed to be single-threaded, this is
+	 * just to make the assert_spin_locked check happy. */
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
+	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -2769,6 +3425,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
 
 
 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
 				blc_event = true;
 				blc_event = true;
+
+			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+				i9xx_pipe_crc_irq_handler(dev, pipe);
 		}
 		}
 
 
 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -2867,7 +3526,9 @@ static int i965_irq_postinstall(struct drm_device *dev)
 	/* Interrupt setup is already guaranteed to be single-threaded, this is
 	/* Interrupt setup is already guaranteed to be single-threaded, this is
 	 * just to make the assert_spin_locked check happy. */
 	 * just to make the assert_spin_locked check happy. */
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
+	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
+	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
+	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
 
 	/*
 	/*
@@ -3013,6 +3674,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
 
 
 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
 				blc_event = true;
 				blc_event = true;
+
+			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+				i9xx_pipe_crc_irq_handler(dev, pipe);
 		}
 		}
 
 
 
 
@@ -3122,18 +3786,21 @@ void intel_irq_init(struct drm_device *dev)
 
 
 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
 
 
-	dev->driver->get_vblank_counter = i915_get_vblank_counter;
-	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
-	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+	if (IS_GEN2(dev)) {
+		dev->max_vblank_count = 0;
+		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
+	} else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
 		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
 		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+	} else {
+		dev->driver->get_vblank_counter = i915_get_vblank_counter;
+		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
 	}
 	}
 
 
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 		dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
 		dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
-	else
-		dev->driver->get_vblank_timestamp = NULL;
-	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+		dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+	}
 
 
 	if (IS_VALLEYVIEW(dev)) {
 	if (IS_VALLEYVIEW(dev)) {
 		dev->driver->irq_handler = valleyview_irq_handler;
 		dev->driver->irq_handler = valleyview_irq_handler;
@@ -3143,6 +3810,14 @@ void intel_irq_init(struct drm_device *dev)
 		dev->driver->enable_vblank = valleyview_enable_vblank;
 		dev->driver->enable_vblank = valleyview_enable_vblank;
 		dev->driver->disable_vblank = valleyview_disable_vblank;
 		dev->driver->disable_vblank = valleyview_disable_vblank;
 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+	} else if (IS_GEN8(dev)) {
+		dev->driver->irq_handler = gen8_irq_handler;
+		dev->driver->irq_preinstall = gen8_irq_preinstall;
+		dev->driver->irq_postinstall = gen8_irq_postinstall;
+		dev->driver->irq_uninstall = gen8_irq_uninstall;
+		dev->driver->enable_vblank = gen8_enable_vblank;
+		dev->driver->disable_vblank = gen8_disable_vblank;
+		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
 	} else if (HAS_PCH_SPLIT(dev)) {
 	} else if (HAS_PCH_SPLIT(dev)) {
 		dev->driver->irq_handler = ironlake_irq_handler;
 		dev->driver->irq_handler = ironlake_irq_handler;
 		dev->driver->irq_preinstall = ironlake_irq_preinstall;
 		dev->driver->irq_preinstall = ironlake_irq_preinstall;

+ 783 - 44
drivers/gpu/drm/i915/i915_reg.h

@@ -26,6 +26,7 @@
 #define _I915_REG_H_
 #define _I915_REG_H_
 
 
 #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
 #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _PIPE_INC(pipe, base, inc) ((base) + (pipe)*(inc))
 #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
 #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
 
 
 #define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
 #define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
@@ -109,6 +110,9 @@
 #define RING_PP_DIR_DCLV(ring)		((ring)->mmio_base+0x220)
 #define RING_PP_DIR_DCLV(ring)		((ring)->mmio_base+0x220)
 #define   PP_DIR_DCLV_2G		0xffffffff
 #define   PP_DIR_DCLV_2G		0xffffffff
 
 
+#define GEN8_RING_PDP_UDW(ring, n)	((ring)->mmio_base+0x270 + ((n) * 8 + 4))
+#define GEN8_RING_PDP_LDW(ring, n)	((ring)->mmio_base+0x270 + (n) * 8)
+
 #define GAM_ECOCHK			0x4090
 #define GAM_ECOCHK			0x4090
 #define   ECOCHK_SNB_BIT		(1<<10)
 #define   ECOCHK_SNB_BIT		(1<<10)
 #define   HSW_ECOCHK_ARB_PRIO_SOL	(1<<6)
 #define   HSW_ECOCHK_ARB_PRIO_SOL	(1<<6)
@@ -246,6 +250,7 @@
 #define   MI_BATCH_NON_SECURE_HSW 	(1<<13)
 #define   MI_BATCH_NON_SECURE_HSW 	(1<<13)
 #define MI_BATCH_BUFFER_START	MI_INSTR(0x31, 0)
 #define MI_BATCH_BUFFER_START	MI_INSTR(0x31, 0)
 #define   MI_BATCH_GTT		    (2<<6) /* aliased with (1<<7) on gen4 */
 #define   MI_BATCH_GTT		    (2<<6) /* aliased with (1<<7) on gen4 */
+#define MI_BATCH_BUFFER_START_GEN8	MI_INSTR(0x31, 1)
 #define MI_SEMAPHORE_MBOX	MI_INSTR(0x16, 1) /* gen6+ */
 #define MI_SEMAPHORE_MBOX	MI_INSTR(0x16, 1) /* gen6+ */
 #define  MI_SEMAPHORE_GLOBAL_GTT    (1<<22)
 #define  MI_SEMAPHORE_GLOBAL_GTT    (1<<22)
 #define  MI_SEMAPHORE_UPDATE	    (1<<21)
 #define  MI_SEMAPHORE_UPDATE	    (1<<21)
@@ -264,6 +269,11 @@
 #define  MI_SEMAPHORE_SYNC_VVE	    (1<<16) /* VECS wait for VCS  (VEVSYNC) */
 #define  MI_SEMAPHORE_SYNC_VVE	    (1<<16) /* VECS wait for VCS  (VEVSYNC) */
 #define  MI_SEMAPHORE_SYNC_RVE	    (2<<16) /* VECS wait for RCS  (VERSYNC) */
 #define  MI_SEMAPHORE_SYNC_RVE	    (2<<16) /* VECS wait for RCS  (VERSYNC) */
 #define  MI_SEMAPHORE_SYNC_INVALID  (3<<16)
 #define  MI_SEMAPHORE_SYNC_INVALID  (3<<16)
+
+#define MI_PREDICATE_RESULT_2	(0x2214)
+#define  LOWER_SLICE_ENABLED	(1<<0)
+#define  LOWER_SLICE_DISABLED	(0<<0)
+
 /*
 /*
  * 3D instructions used by the kernel
  * 3D instructions used by the kernel
  */
  */
@@ -346,12 +356,25 @@
 #define   IOSF_PORT_PUNIT			0x4
 #define   IOSF_PORT_PUNIT			0x4
 #define   IOSF_PORT_NC				0x11
 #define   IOSF_PORT_NC				0x11
 #define   IOSF_PORT_DPIO			0x12
 #define   IOSF_PORT_DPIO			0x12
+#define   IOSF_PORT_GPIO_NC			0x13
+#define   IOSF_PORT_CCK				0x14
+#define   IOSF_PORT_CCU				0xA9
+#define   IOSF_PORT_GPS_CORE			0x48
 #define VLV_IOSF_DATA				(VLV_DISPLAY_BASE + 0x2104)
 #define VLV_IOSF_DATA				(VLV_DISPLAY_BASE + 0x2104)
 #define VLV_IOSF_ADDR				(VLV_DISPLAY_BASE + 0x2108)
 #define VLV_IOSF_ADDR				(VLV_DISPLAY_BASE + 0x2108)
 
 
 #define PUNIT_OPCODE_REG_READ			6
 #define PUNIT_OPCODE_REG_READ			6
 #define PUNIT_OPCODE_REG_WRITE			7
 #define PUNIT_OPCODE_REG_WRITE			7
 
 
+#define PUNIT_REG_PWRGT_CTRL			0x60
+#define PUNIT_REG_PWRGT_STATUS			0x61
+#define	  PUNIT_CLK_GATE			1
+#define	  PUNIT_PWR_RESET			2
+#define	  PUNIT_PWR_GATE			3
+#define	  RENDER_PWRGT				(PUNIT_PWR_GATE << 0)
+#define	  MEDIA_PWRGT				(PUNIT_PWR_GATE << 2)
+#define	  DISP2D_PWRGT				(PUNIT_PWR_GATE << 6)
+
 #define PUNIT_REG_GPU_LFM			0xd3
 #define PUNIT_REG_GPU_LFM			0xd3
 #define PUNIT_REG_GPU_FREQ_REQ			0xd4
 #define PUNIT_REG_GPU_FREQ_REQ			0xd4
 #define PUNIT_REG_GPU_FREQ_STS			0xd8
 #define PUNIT_REG_GPU_FREQ_STS			0xd8
@@ -372,6 +395,40 @@
 #define   FB_FMAX_VMIN_FREQ_LO_SHIFT		27
 #define   FB_FMAX_VMIN_FREQ_LO_SHIFT		27
 #define   FB_FMAX_VMIN_FREQ_LO_MASK		0xf8000000
 #define   FB_FMAX_VMIN_FREQ_LO_MASK		0xf8000000
 
 
+/* vlv2 north clock has */
+#define CCK_FUSE_REG				0x8
+#define  CCK_FUSE_HPLL_FREQ_MASK		0x3
+#define CCK_REG_DSI_PLL_FUSE			0x44
+#define CCK_REG_DSI_PLL_CONTROL			0x48
+#define  DSI_PLL_VCO_EN				(1 << 31)
+#define  DSI_PLL_LDO_GATE			(1 << 30)
+#define  DSI_PLL_P1_POST_DIV_SHIFT		17
+#define  DSI_PLL_P1_POST_DIV_MASK		(0x1ff << 17)
+#define  DSI_PLL_P2_MUX_DSI0_DIV2		(1 << 13)
+#define  DSI_PLL_P3_MUX_DSI1_DIV2		(1 << 12)
+#define  DSI_PLL_MUX_MASK			(3 << 9)
+#define  DSI_PLL_MUX_DSI0_DSIPLL		(0 << 10)
+#define  DSI_PLL_MUX_DSI0_CCK			(1 << 10)
+#define  DSI_PLL_MUX_DSI1_DSIPLL		(0 << 9)
+#define  DSI_PLL_MUX_DSI1_CCK			(1 << 9)
+#define  DSI_PLL_CLK_GATE_MASK			(0xf << 5)
+#define  DSI_PLL_CLK_GATE_DSI0_DSIPLL		(1 << 8)
+#define  DSI_PLL_CLK_GATE_DSI1_DSIPLL		(1 << 7)
+#define  DSI_PLL_CLK_GATE_DSI0_CCK		(1 << 6)
+#define  DSI_PLL_CLK_GATE_DSI1_CCK		(1 << 5)
+#define  DSI_PLL_LOCK				(1 << 0)
+#define CCK_REG_DSI_PLL_DIVIDER			0x4c
+#define  DSI_PLL_LFSR				(1 << 31)
+#define  DSI_PLL_FRACTION_EN			(1 << 30)
+#define  DSI_PLL_FRAC_COUNTER_SHIFT		27
+#define  DSI_PLL_FRAC_COUNTER_MASK		(7 << 27)
+#define  DSI_PLL_USYNC_CNT_SHIFT		18
+#define  DSI_PLL_USYNC_CNT_MASK			(0x1ff << 18)
+#define  DSI_PLL_N1_DIV_SHIFT			16
+#define  DSI_PLL_N1_DIV_MASK			(3 << 16)
+#define  DSI_PLL_M1_DIV_SHIFT			0
+#define  DSI_PLL_M1_DIV_MASK			(0x1ff << 0)
+
 /*
 /*
  * DPIO - a special bus for various display related registers to hide behind
  * DPIO - a special bus for various display related registers to hide behind
  *
  *
@@ -387,11 +444,11 @@
 #define  DPIO_MODSEL1			(1<<3) /* if ref clk b == 27 */
 #define  DPIO_MODSEL1			(1<<3) /* if ref clk b == 27 */
 #define  DPIO_MODSEL0			(1<<2) /* if ref clk a == 27 */
 #define  DPIO_MODSEL0			(1<<2) /* if ref clk a == 27 */
 #define  DPIO_SFR_BYPASS		(1<<1)
 #define  DPIO_SFR_BYPASS		(1<<1)
-#define  DPIO_RESET			(1<<0)
+#define  DPIO_CMNRST			(1<<0)
 
 
 #define _DPIO_TX3_SWING_CTL4_A		0x690
 #define _DPIO_TX3_SWING_CTL4_A		0x690
 #define _DPIO_TX3_SWING_CTL4_B		0x2a90
 #define _DPIO_TX3_SWING_CTL4_B		0x2a90
-#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX_SWING_CTL4_A, \
+#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX3_SWING_CTL4_A, \
 					_DPIO_TX3_SWING_CTL4_B)
 					_DPIO_TX3_SWING_CTL4_B)
 
 
 /*
 /*
@@ -602,6 +659,9 @@
 #define ARB_MODE		0x04030
 #define ARB_MODE		0x04030
 #define   ARB_MODE_SWIZZLE_SNB	(1<<4)
 #define   ARB_MODE_SWIZZLE_SNB	(1<<4)
 #define   ARB_MODE_SWIZZLE_IVB	(1<<5)
 #define   ARB_MODE_SWIZZLE_IVB	(1<<5)
+#define GAMTARBMODE		0x04a08
+#define   ARB_MODE_BWGTLB_DISABLE (1<<9)
+#define   ARB_MODE_SWIZZLE_BDW	(1<<1)
 #define RENDER_HWS_PGA_GEN7	(0x04080)
 #define RENDER_HWS_PGA_GEN7	(0x04080)
 #define RING_FAULT_REG(ring)	(0x4094 + 0x100*(ring)->id)
 #define RING_FAULT_REG(ring)	(0x4094 + 0x100*(ring)->id)
 #define   RING_FAULT_GTTSEL_MASK (1<<11)
 #define   RING_FAULT_GTTSEL_MASK (1<<11)
@@ -609,6 +669,7 @@
 #define   RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3)
 #define   RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3)
 #define   RING_FAULT_VALID	(1<<0)
 #define   RING_FAULT_VALID	(1<<0)
 #define DONE_REG		0x40b0
 #define DONE_REG		0x40b0
+#define GEN8_PRIVATE_PAT	0x40e0
 #define BSD_HWS_PGA_GEN7	(0x04180)
 #define BSD_HWS_PGA_GEN7	(0x04180)
 #define BLT_HWS_PGA_GEN7	(0x04280)
 #define BLT_HWS_PGA_GEN7	(0x04280)
 #define VEBOX_HWS_PGA_GEN7	(0x04380)
 #define VEBOX_HWS_PGA_GEN7	(0x04380)
@@ -669,13 +730,18 @@
 #define NOPID		0x02094
 #define NOPID		0x02094
 #define HWSTAM		0x02098
 #define HWSTAM		0x02098
 #define DMA_FADD_I8XX	0x020d0
 #define DMA_FADD_I8XX	0x020d0
+#define RING_BBSTATE(base)	((base)+0x110)
 
 
 #define ERROR_GEN6	0x040a0
 #define ERROR_GEN6	0x040a0
 #define GEN7_ERR_INT	0x44040
 #define GEN7_ERR_INT	0x44040
 #define   ERR_INT_POISON		(1<<31)
 #define   ERR_INT_POISON		(1<<31)
 #define   ERR_INT_MMIO_UNCLAIMED	(1<<13)
 #define   ERR_INT_MMIO_UNCLAIMED	(1<<13)
+#define   ERR_INT_PIPE_CRC_DONE_C	(1<<8)
 #define   ERR_INT_FIFO_UNDERRUN_C	(1<<6)
 #define   ERR_INT_FIFO_UNDERRUN_C	(1<<6)
+#define   ERR_INT_PIPE_CRC_DONE_B	(1<<5)
 #define   ERR_INT_FIFO_UNDERRUN_B	(1<<3)
 #define   ERR_INT_FIFO_UNDERRUN_B	(1<<3)
+#define   ERR_INT_PIPE_CRC_DONE_A	(1<<2)
+#define   ERR_INT_PIPE_CRC_DONE(pipe)	(1<<(2 + pipe*3))
 #define   ERR_INT_FIFO_UNDERRUN_A	(1<<0)
 #define   ERR_INT_FIFO_UNDERRUN_A	(1<<0)
 #define   ERR_INT_FIFO_UNDERRUN(pipe)	(1<<(pipe*3))
 #define   ERR_INT_FIFO_UNDERRUN(pipe)	(1<<(pipe*3))
 
 
@@ -683,6 +749,7 @@
 #define   FPGA_DBG_RM_NOCLAIM	(1<<31)
 #define   FPGA_DBG_RM_NOCLAIM	(1<<31)
 
 
 #define DERRMR		0x44050
 #define DERRMR		0x44050
+/* Note that HBLANK events are reserved on bdw+ */
 #define   DERRMR_PIPEA_SCANLINE		(1<<0)
 #define   DERRMR_PIPEA_SCANLINE		(1<<0)
 #define   DERRMR_PIPEA_PRI_FLIP_DONE	(1<<1)
 #define   DERRMR_PIPEA_PRI_FLIP_DONE	(1<<1)
 #define   DERRMR_PIPEA_SPR_FLIP_DONE	(1<<2)
 #define   DERRMR_PIPEA_SPR_FLIP_DONE	(1<<2)
@@ -716,6 +783,7 @@
 #define _3D_CHICKEN3	0x02090
 #define _3D_CHICKEN3	0x02090
 #define  _3D_CHICKEN_SF_DISABLE_OBJEND_CULL		(1 << 10)
 #define  _3D_CHICKEN_SF_DISABLE_OBJEND_CULL		(1 << 10)
 #define  _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL		(1 << 5)
 #define  _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL		(1 << 5)
+#define  _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x)	((x)<<1)
 
 
 #define MI_MODE		0x0209c
 #define MI_MODE		0x0209c
 # define VS_TIMER_DISPATCH				(1 << 6)
 # define VS_TIMER_DISPATCH				(1 << 6)
@@ -890,6 +958,7 @@
 #define GT_BLT_USER_INTERRUPT			(1 << 22)
 #define GT_BLT_USER_INTERRUPT			(1 << 22)
 #define GT_BSD_CS_ERROR_INTERRUPT		(1 << 15)
 #define GT_BSD_CS_ERROR_INTERRUPT		(1 << 15)
 #define GT_BSD_USER_INTERRUPT			(1 << 12)
 #define GT_BSD_USER_INTERRUPT			(1 << 12)
+#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1	(1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
 #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT	(1 <<  5) /* !snb */
 #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT	(1 <<  5) /* !snb */
 #define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT	(1 <<  4)
 #define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT	(1 <<  4)
 #define GT_RENDER_CS_MASTER_ERROR_INTERRUPT	(1 <<  3)
 #define GT_RENDER_CS_MASTER_ERROR_INTERRUPT	(1 <<  3)
@@ -900,6 +969,10 @@
 #define PM_VEBOX_CS_ERROR_INTERRUPT		(1 << 12) /* hsw+ */
 #define PM_VEBOX_CS_ERROR_INTERRUPT		(1 << 12) /* hsw+ */
 #define PM_VEBOX_USER_INTERRUPT			(1 << 10) /* hsw+ */
 #define PM_VEBOX_USER_INTERRUPT			(1 << 10) /* hsw+ */
 
 
+#define GT_PARITY_ERROR(dev) \
+	(GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \
+	 (IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
+
 /* These are all the "old" interrupts */
 /* These are all the "old" interrupts */
 #define ILK_BSD_USER_INTERRUPT				(1<<5)
 #define ILK_BSD_USER_INTERRUPT				(1<<5)
 #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT		(1<<18)
 #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT		(1<<18)
@@ -1048,9 +1121,6 @@
 					     _HSW_PIPE_SLICE_CHICKEN_1_A, + \
 					     _HSW_PIPE_SLICE_CHICKEN_1_A, + \
 					     _HSW_PIPE_SLICE_CHICKEN_1_B)
 					     _HSW_PIPE_SLICE_CHICKEN_1_B)
 
 
-#define HSW_CLKGATE_DISABLE_PART_1	0x46500
-#define   HSW_DPFC_GATING_DISABLE	(1<<23)
-
 /*
 /*
  * GPIO regs
  * GPIO regs
  */
  */
@@ -1387,6 +1457,12 @@
 
 
 #define MI_ARB_VLV		(VLV_DISPLAY_BASE + 0x6504)
 #define MI_ARB_VLV		(VLV_DISPLAY_BASE + 0x6504)
 
 
+#define CZCLK_CDCLK_FREQ_RATIO	(VLV_DISPLAY_BASE + 0x6508)
+#define   CDCLK_FREQ_SHIFT	4
+#define   CDCLK_FREQ_MASK	(0x1f << CDCLK_FREQ_SHIFT)
+#define   CZCLK_FREQ_MASK	0xf
+#define GMBUSFREQ_VLV		(VLV_DISPLAY_BASE + 0x6510)
+
 /*
 /*
  * Palette regs
  * Palette regs
  */
  */
@@ -1404,13 +1480,15 @@
  * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
  * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
  * every way.  It is not accessible from the CP register read instructions.
  * every way.  It is not accessible from the CP register read instructions.
  *
  *
+ * Starting from Haswell, you can't write registers using the MCHBAR mirror,
+ * just read.
  */
  */
 #define MCHBAR_MIRROR_BASE	0x10000
 #define MCHBAR_MIRROR_BASE	0x10000
 
 
 #define MCHBAR_MIRROR_BASE_SNB	0x140000
 #define MCHBAR_MIRROR_BASE_SNB	0x140000
 
 
 /* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
 /* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
-#define DCLK 0x5e04
+#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04)
 
 
 /** 915-945 and GM965 MCH register controlling DRAM channel access */
 /** 915-945 and GM965 MCH register controlling DRAM channel access */
 #define DCC			0x10200
 #define DCC			0x10200
@@ -1705,9 +1783,9 @@
 #define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
 #define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
 #define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
 #define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
 
 
-#define GEN6_GT_PERF_STATUS	0x145948
-#define GEN6_RP_STATE_LIMITS	0x145994
-#define GEN6_RP_STATE_CAP	0x145998
+#define GEN6_GT_PERF_STATUS	(MCHBAR_MIRROR_BASE_SNB + 0x5948)
+#define GEN6_RP_STATE_LIMITS	(MCHBAR_MIRROR_BASE_SNB + 0x5994)
+#define GEN6_RP_STATE_CAP	(MCHBAR_MIRROR_BASE_SNB + 0x5998)
 
 
 /*
 /*
  * Logical Context regs
  * Logical Context regs
@@ -1752,6 +1830,12 @@
  * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
  * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
  */
  */
 #define HSW_CXT_TOTAL_SIZE		(17 * PAGE_SIZE)
 #define HSW_CXT_TOTAL_SIZE		(17 * PAGE_SIZE)
+/* Same as Haswell, but 72064 bytes now. */
+#define GEN8_CXT_TOTAL_SIZE		(18 * PAGE_SIZE)
+
+
+#define VLV_CLK_CTL2			0x101104
+#define   CLK_CTL2_CZCOUNT_30NS_SHIFT	28
 
 
 /*
 /*
  * Overlay regs
  * Overlay regs
@@ -1771,6 +1855,83 @@
  * Display engine regs
  * Display engine regs
  */
  */
 
 
+/* Pipe A CRC regs */
+#define _PIPE_CRC_CTL_A		(dev_priv->info->display_mmio_offset + 0x60050)
+#define   PIPE_CRC_ENABLE		(1 << 31)
+/* ivb+ source selection */
+#define   PIPE_CRC_SOURCE_PRIMARY_IVB	(0 << 29)
+#define   PIPE_CRC_SOURCE_SPRITE_IVB	(1 << 29)
+#define   PIPE_CRC_SOURCE_PF_IVB	(2 << 29)
+/* ilk+ source selection */
+#define   PIPE_CRC_SOURCE_PRIMARY_ILK	(0 << 28)
+#define   PIPE_CRC_SOURCE_SPRITE_ILK	(1 << 28)
+#define   PIPE_CRC_SOURCE_PIPE_ILK	(2 << 28)
+/* embedded DP port on the north display block, reserved on ivb */
+#define   PIPE_CRC_SOURCE_PORT_A_ILK	(4 << 28)
+#define   PIPE_CRC_SOURCE_FDI_ILK	(5 << 28) /* reserved on ivb */
+/* vlv source selection */
+#define   PIPE_CRC_SOURCE_PIPE_VLV	(0 << 27)
+#define   PIPE_CRC_SOURCE_HDMIB_VLV	(1 << 27)
+#define   PIPE_CRC_SOURCE_HDMIC_VLV	(2 << 27)
+/* with DP port the pipe source is invalid */
+#define   PIPE_CRC_SOURCE_DP_D_VLV	(3 << 27)
+#define   PIPE_CRC_SOURCE_DP_B_VLV	(6 << 27)
+#define   PIPE_CRC_SOURCE_DP_C_VLV	(7 << 27)
+/* gen3+ source selection */
+#define   PIPE_CRC_SOURCE_PIPE_I9XX	(0 << 28)
+#define   PIPE_CRC_SOURCE_SDVOB_I9XX	(1 << 28)
+#define   PIPE_CRC_SOURCE_SDVOC_I9XX	(2 << 28)
+/* with DP/TV port the pipe source is invalid */
+#define   PIPE_CRC_SOURCE_DP_D_G4X	(3 << 28)
+#define   PIPE_CRC_SOURCE_TV_PRE	(4 << 28)
+#define   PIPE_CRC_SOURCE_TV_POST	(5 << 28)
+#define   PIPE_CRC_SOURCE_DP_B_G4X	(6 << 28)
+#define   PIPE_CRC_SOURCE_DP_C_G4X	(7 << 28)
+/* gen2 doesn't have source selection bits */
+#define   PIPE_CRC_INCLUDE_BORDER_I8XX	(1 << 30)
+
+#define _PIPE_CRC_RES_1_A_IVB		0x60064
+#define _PIPE_CRC_RES_2_A_IVB		0x60068
+#define _PIPE_CRC_RES_3_A_IVB		0x6006c
+#define _PIPE_CRC_RES_4_A_IVB		0x60070
+#define _PIPE_CRC_RES_5_A_IVB		0x60074
+
+#define _PIPE_CRC_RES_RED_A		(dev_priv->info->display_mmio_offset + 0x60060)
+#define _PIPE_CRC_RES_GREEN_A		(dev_priv->info->display_mmio_offset + 0x60064)
+#define _PIPE_CRC_RES_BLUE_A		(dev_priv->info->display_mmio_offset + 0x60068)
+#define _PIPE_CRC_RES_RES1_A_I915	(dev_priv->info->display_mmio_offset + 0x6006c)
+#define _PIPE_CRC_RES_RES2_A_G4X	(dev_priv->info->display_mmio_offset + 0x60080)
+
+/* Pipe B CRC regs */
+#define _PIPE_CRC_RES_1_B_IVB		0x61064
+#define _PIPE_CRC_RES_2_B_IVB		0x61068
+#define _PIPE_CRC_RES_3_B_IVB		0x6106c
+#define _PIPE_CRC_RES_4_B_IVB		0x61070
+#define _PIPE_CRC_RES_5_B_IVB		0x61074
+
+#define PIPE_CRC_CTL(pipe)	_PIPE_INC(pipe, _PIPE_CRC_CTL_A, 0x01000)
+#define PIPE_CRC_RES_1_IVB(pipe)	\
+	_PIPE(pipe, _PIPE_CRC_RES_1_A_IVB, _PIPE_CRC_RES_1_B_IVB)
+#define PIPE_CRC_RES_2_IVB(pipe)	\
+	_PIPE(pipe, _PIPE_CRC_RES_2_A_IVB, _PIPE_CRC_RES_2_B_IVB)
+#define PIPE_CRC_RES_3_IVB(pipe)	\
+	_PIPE(pipe, _PIPE_CRC_RES_3_A_IVB, _PIPE_CRC_RES_3_B_IVB)
+#define PIPE_CRC_RES_4_IVB(pipe)	\
+	_PIPE(pipe, _PIPE_CRC_RES_4_A_IVB, _PIPE_CRC_RES_4_B_IVB)
+#define PIPE_CRC_RES_5_IVB(pipe)	\
+	_PIPE(pipe, _PIPE_CRC_RES_5_A_IVB, _PIPE_CRC_RES_5_B_IVB)
+
+#define PIPE_CRC_RES_RED(pipe) \
+	_PIPE_INC(pipe, _PIPE_CRC_RES_RED_A, 0x01000)
+#define PIPE_CRC_RES_GREEN(pipe) \
+	_PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A, 0x01000)
+#define PIPE_CRC_RES_BLUE(pipe) \
+	_PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A, 0x01000)
+#define PIPE_CRC_RES_RES1_I915(pipe) \
+	_PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_I915, 0x01000)
+#define PIPE_CRC_RES_RES2_G4X(pipe) \
+	_PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_G4X, 0x01000)
+
 /* Pipe A timing regs */
 /* Pipe A timing regs */
 #define _HTOTAL_A	(dev_priv->info->display_mmio_offset + 0x60000)
 #define _HTOTAL_A	(dev_priv->info->display_mmio_offset + 0x60000)
 #define _HBLANK_A	(dev_priv->info->display_mmio_offset + 0x60004)
 #define _HBLANK_A	(dev_priv->info->display_mmio_offset + 0x60004)
@@ -1793,7 +1954,6 @@
 #define _BCLRPAT_B	(dev_priv->info->display_mmio_offset + 0x61020)
 #define _BCLRPAT_B	(dev_priv->info->display_mmio_offset + 0x61020)
 #define _VSYNCSHIFT_B	(dev_priv->info->display_mmio_offset + 0x61028)
 #define _VSYNCSHIFT_B	(dev_priv->info->display_mmio_offset + 0x61028)
 
 
-
 #define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
 #define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
 #define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
 #define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
 #define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
 #define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
@@ -1803,8 +1963,9 @@
 #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
 #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
 #define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
 #define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
 
 
-/* HSW eDP PSR registers */
-#define EDP_PSR_CTL				0x64800
+/* HSW+ eDP PSR registers */
+#define EDP_PSR_BASE(dev)                       (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
+#define EDP_PSR_CTL(dev)			(EDP_PSR_BASE(dev) + 0)
 #define   EDP_PSR_ENABLE			(1<<31)
 #define   EDP_PSR_ENABLE			(1<<31)
 #define   EDP_PSR_LINK_DISABLE			(0<<27)
 #define   EDP_PSR_LINK_DISABLE			(0<<27)
 #define   EDP_PSR_LINK_STANDBY			(1<<27)
 #define   EDP_PSR_LINK_STANDBY			(1<<27)
@@ -1827,16 +1988,16 @@
 #define   EDP_PSR_TP1_TIME_0us			(3<<4)
 #define   EDP_PSR_TP1_TIME_0us			(3<<4)
 #define   EDP_PSR_IDLE_FRAME_SHIFT		0
 #define   EDP_PSR_IDLE_FRAME_SHIFT		0
 
 
-#define EDP_PSR_AUX_CTL			0x64810
-#define EDP_PSR_AUX_DATA1		0x64814
+#define EDP_PSR_AUX_CTL(dev)			(EDP_PSR_BASE(dev) + 0x10)
+#define EDP_PSR_AUX_DATA1(dev)			(EDP_PSR_BASE(dev) + 0x14)
 #define   EDP_PSR_DPCD_COMMAND		0x80060000
 #define   EDP_PSR_DPCD_COMMAND		0x80060000
-#define EDP_PSR_AUX_DATA2		0x64818
+#define EDP_PSR_AUX_DATA2(dev)			(EDP_PSR_BASE(dev) + 0x18)
 #define   EDP_PSR_DPCD_NORMAL_OPERATION	(1<<24)
 #define   EDP_PSR_DPCD_NORMAL_OPERATION	(1<<24)
-#define EDP_PSR_AUX_DATA3		0x6481c
-#define EDP_PSR_AUX_DATA4		0x64820
-#define EDP_PSR_AUX_DATA5		0x64824
+#define EDP_PSR_AUX_DATA3(dev)			(EDP_PSR_BASE(dev) + 0x1c)
+#define EDP_PSR_AUX_DATA4(dev)			(EDP_PSR_BASE(dev) + 0x20)
+#define EDP_PSR_AUX_DATA5(dev)			(EDP_PSR_BASE(dev) + 0x24)
 
 
-#define EDP_PSR_STATUS_CTL			0x64840
+#define EDP_PSR_STATUS_CTL(dev)			(EDP_PSR_BASE(dev) + 0x40)
 #define   EDP_PSR_STATUS_STATE_MASK		(7<<29)
 #define   EDP_PSR_STATUS_STATE_MASK		(7<<29)
 #define   EDP_PSR_STATUS_STATE_IDLE		(0<<29)
 #define   EDP_PSR_STATUS_STATE_IDLE		(0<<29)
 #define   EDP_PSR_STATUS_STATE_SRDONACK		(1<<29)
 #define   EDP_PSR_STATUS_STATE_SRDONACK		(1<<29)
@@ -1860,10 +2021,10 @@
 #define   EDP_PSR_STATUS_SENDING_TP1		(1<<4)
 #define   EDP_PSR_STATUS_SENDING_TP1		(1<<4)
 #define   EDP_PSR_STATUS_IDLE_MASK		0xf
 #define   EDP_PSR_STATUS_IDLE_MASK		0xf
 
 
-#define EDP_PSR_PERF_CNT		0x64844
+#define EDP_PSR_PERF_CNT(dev)		(EDP_PSR_BASE(dev) + 0x44)
 #define   EDP_PSR_PERF_CNT_MASK		0xffffff
 #define   EDP_PSR_PERF_CNT_MASK		0xffffff
 
 
-#define EDP_PSR_DEBUG_CTL		0x64860
+#define EDP_PSR_DEBUG_CTL(dev)		(EDP_PSR_BASE(dev) + 0x60)
 #define   EDP_PSR_DEBUG_MASK_LPSP	(1<<27)
 #define   EDP_PSR_DEBUG_MASK_LPSP	(1<<27)
 #define   EDP_PSR_DEBUG_MASK_MEMUP	(1<<26)
 #define   EDP_PSR_DEBUG_MASK_MEMUP	(1<<26)
 #define   EDP_PSR_DEBUG_MASK_HPD	(1<<25)
 #define   EDP_PSR_DEBUG_MASK_HPD	(1<<25)
@@ -2006,6 +2167,14 @@
 #define PCH_HDMIC	0xe1150
 #define PCH_HDMIC	0xe1150
 #define PCH_HDMID	0xe1160
 #define PCH_HDMID	0xe1160
 
 
+#define PORT_DFT_I9XX				0x61150
+#define   DC_BALANCE_RESET			(1 << 25)
+#define PORT_DFT2_G4X				0x61154
+#define   DC_BALANCE_RESET_VLV			(1 << 31)
+#define   PIPE_SCRAMBLE_RESET_MASK		(0x3 << 0)
+#define   PIPE_B_SCRAMBLE_RESET			(1 << 1)
+#define   PIPE_A_SCRAMBLE_RESET			(1 << 0)
+
 /* Gen 3 SDVO bits: */
 /* Gen 3 SDVO bits: */
 #define   SDVO_ENABLE				(1 << 31)
 #define   SDVO_ENABLE				(1 << 31)
 #define   SDVO_PIPE_SEL(pipe)			((pipe) << 30)
 #define   SDVO_PIPE_SEL(pipe)			((pipe) << 30)
@@ -2034,6 +2203,7 @@
 
 
 /* Gen 4 SDVO/HDMI bits: */
 /* Gen 4 SDVO/HDMI bits: */
 #define   SDVO_COLOR_FORMAT_8bpc		(0 << 26)
 #define   SDVO_COLOR_FORMAT_8bpc		(0 << 26)
+#define   SDVO_COLOR_FORMAT_MASK		(7 << 26)
 #define   SDVO_ENCODING_SDVO			(0 << 10)
 #define   SDVO_ENCODING_SDVO			(0 << 10)
 #define   SDVO_ENCODING_HDMI			(2 << 10)
 #define   SDVO_ENCODING_HDMI			(2 << 10)
 #define   HDMI_MODE_SELECT_HDMI			(1 << 9) /* HDMI only */
 #define   HDMI_MODE_SELECT_HDMI			(1 << 9) /* HDMI only */
@@ -2238,6 +2408,21 @@
 
 
 #define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
 #define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
 
 
+#define _VLV_BLC_PWM_CTL2_A (dev_priv->info->display_mmio_offset + 0x61250)
+#define _VLV_BLC_PWM_CTL2_B (dev_priv->info->display_mmio_offset + 0x61350)
+#define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
+				     _VLV_BLC_PWM_CTL2_B)
+
+#define _VLV_BLC_PWM_CTL_A (dev_priv->info->display_mmio_offset + 0x61254)
+#define _VLV_BLC_PWM_CTL_B (dev_priv->info->display_mmio_offset + 0x61354)
+#define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
+				    _VLV_BLC_PWM_CTL_B)
+
+#define _VLV_BLC_HIST_CTL_A (dev_priv->info->display_mmio_offset + 0x61260)
+#define _VLV_BLC_HIST_CTL_B (dev_priv->info->display_mmio_offset + 0x61360)
+#define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
+				     _VLV_BLC_HIST_CTL_B)
+
 /* Backlight control */
 /* Backlight control */
 #define BLC_PWM_CTL2	(dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */
 #define BLC_PWM_CTL2	(dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */
 #define   BLM_PWM_ENABLE		(1 << 31)
 #define   BLM_PWM_ENABLE		(1 << 31)
@@ -2986,6 +3171,7 @@
 #define   PIPECONF_DISABLE	0
 #define   PIPECONF_DISABLE	0
 #define   PIPECONF_DOUBLE_WIDE	(1<<30)
 #define   PIPECONF_DOUBLE_WIDE	(1<<30)
 #define   I965_PIPECONF_ACTIVE	(1<<30)
 #define   I965_PIPECONF_ACTIVE	(1<<30)
+#define   PIPECONF_DSI_PLL_LOCKED	(1<<29) /* vlv & pipe A only */
 #define   PIPECONF_FRAME_START_DELAY_MASK (3<<27)
 #define   PIPECONF_FRAME_START_DELAY_MASK (3<<27)
 #define   PIPECONF_SINGLE_WIDE	0
 #define   PIPECONF_SINGLE_WIDE	0
 #define   PIPECONF_PIPE_UNLOCKED 0
 #define   PIPECONF_PIPE_UNLOCKED 0
@@ -3068,6 +3254,18 @@
 #define PIPEFRAMEPIXEL(pipe)  _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
 #define PIPEFRAMEPIXEL(pipe)  _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
 #define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
 #define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
 
 
+#define _PIPE_MISC_A			0x70030
+#define _PIPE_MISC_B			0x71030
+#define   PIPEMISC_DITHER_BPC_MASK	(7<<5)
+#define   PIPEMISC_DITHER_8_BPC		(0<<5)
+#define   PIPEMISC_DITHER_10_BPC	(1<<5)
+#define   PIPEMISC_DITHER_6_BPC		(2<<5)
+#define   PIPEMISC_DITHER_12_BPC	(3<<5)
+#define   PIPEMISC_DITHER_ENABLE	(1<<4)
+#define   PIPEMISC_DITHER_TYPE_MASK	(3<<2)
+#define   PIPEMISC_DITHER_TYPE_SP	(0<<2)
+#define PIPEMISC(pipe) _PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
+
 #define VLV_DPFLIPSTAT				(VLV_DISPLAY_BASE + 0x70028)
 #define VLV_DPFLIPSTAT				(VLV_DISPLAY_BASE + 0x70028)
 #define   PIPEB_LINE_COMPARE_INT_EN		(1<<29)
 #define   PIPEB_LINE_COMPARE_INT_EN		(1<<29)
 #define   PIPEB_HLINE_INT_EN			(1<<28)
 #define   PIPEB_HLINE_INT_EN			(1<<28)
@@ -3184,11 +3382,11 @@
 
 
 /* define the Watermark register on Ironlake */
 /* define the Watermark register on Ironlake */
 #define WM0_PIPEA_ILK		0x45100
 #define WM0_PIPEA_ILK		0x45100
-#define  WM0_PIPE_PLANE_MASK	(0x7f<<16)
+#define  WM0_PIPE_PLANE_MASK	(0xffff<<16)
 #define  WM0_PIPE_PLANE_SHIFT	16
 #define  WM0_PIPE_PLANE_SHIFT	16
-#define  WM0_PIPE_SPRITE_MASK	(0x3f<<8)
+#define  WM0_PIPE_SPRITE_MASK	(0xff<<8)
 #define  WM0_PIPE_SPRITE_SHIFT	8
 #define  WM0_PIPE_SPRITE_SHIFT	8
-#define  WM0_PIPE_CURSOR_MASK	(0x1f)
+#define  WM0_PIPE_CURSOR_MASK	(0xff)
 
 
 #define WM0_PIPEB_ILK		0x45104
 #define WM0_PIPEB_ILK		0x45104
 #define WM0_PIPEC_IVB		0x45200
 #define WM0_PIPEC_IVB		0x45200
@@ -3198,9 +3396,10 @@
 #define  WM1_LP_LATENCY_MASK	(0x7f<<24)
 #define  WM1_LP_LATENCY_MASK	(0x7f<<24)
 #define  WM1_LP_FBC_MASK	(0xf<<20)
 #define  WM1_LP_FBC_MASK	(0xf<<20)
 #define  WM1_LP_FBC_SHIFT	20
 #define  WM1_LP_FBC_SHIFT	20
-#define  WM1_LP_SR_MASK		(0x1ff<<8)
+#define  WM1_LP_FBC_SHIFT_BDW	19
+#define  WM1_LP_SR_MASK		(0x7ff<<8)
 #define  WM1_LP_SR_SHIFT	8
 #define  WM1_LP_SR_SHIFT	8
-#define  WM1_LP_CURSOR_MASK	(0x3f)
+#define  WM1_LP_CURSOR_MASK	(0xff)
 #define WM2_LP_ILK		0x4510c
 #define WM2_LP_ILK		0x4510c
 #define  WM2_LP_EN		(1<<31)
 #define  WM2_LP_EN		(1<<31)
 #define WM3_LP_ILK		0x45110
 #define WM3_LP_ILK		0x45110
@@ -3281,17 +3480,17 @@
  *  } while (high1 != high2);
  *  } while (high1 != high2);
  *  frame = (high1 << 8) | low1;
  *  frame = (high1 << 8) | low1;
  */
  */
-#define _PIPEAFRAMEHIGH          (dev_priv->info->display_mmio_offset + 0x70040)
+#define _PIPEAFRAMEHIGH          0x70040
 #define   PIPE_FRAME_HIGH_MASK    0x0000ffff
 #define   PIPE_FRAME_HIGH_MASK    0x0000ffff
 #define   PIPE_FRAME_HIGH_SHIFT   0
 #define   PIPE_FRAME_HIGH_SHIFT   0
-#define _PIPEAFRAMEPIXEL         (dev_priv->info->display_mmio_offset + 0x70044)
+#define _PIPEAFRAMEPIXEL         0x70044
 #define   PIPE_FRAME_LOW_MASK     0xff000000
 #define   PIPE_FRAME_LOW_MASK     0xff000000
 #define   PIPE_FRAME_LOW_SHIFT    24
 #define   PIPE_FRAME_LOW_SHIFT    24
 #define   PIPE_PIXEL_MASK         0x00ffffff
 #define   PIPE_PIXEL_MASK         0x00ffffff
 #define   PIPE_PIXEL_SHIFT        0
 #define   PIPE_PIXEL_SHIFT        0
 /* GM45+ just has to be different */
 /* GM45+ just has to be different */
-#define _PIPEA_FRMCOUNT_GM45	0x70040
-#define _PIPEA_FLIPCOUNT_GM45	0x70044
+#define _PIPEA_FRMCOUNT_GM45	(dev_priv->info->display_mmio_offset + 0x70040)
+#define _PIPEA_FLIPCOUNT_GM45	(dev_priv->info->display_mmio_offset + 0x70044)
 #define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
 #define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
 
 
 /* Cursor A & B regs */
 /* Cursor A & B regs */
@@ -3422,10 +3621,10 @@
 #define _PIPEBDSL		(dev_priv->info->display_mmio_offset + 0x71000)
 #define _PIPEBDSL		(dev_priv->info->display_mmio_offset + 0x71000)
 #define _PIPEBCONF		(dev_priv->info->display_mmio_offset + 0x71008)
 #define _PIPEBCONF		(dev_priv->info->display_mmio_offset + 0x71008)
 #define _PIPEBSTAT		(dev_priv->info->display_mmio_offset + 0x71024)
 #define _PIPEBSTAT		(dev_priv->info->display_mmio_offset + 0x71024)
-#define _PIPEBFRAMEHIGH		(dev_priv->info->display_mmio_offset + 0x71040)
-#define _PIPEBFRAMEPIXEL	(dev_priv->info->display_mmio_offset + 0x71044)
-#define _PIPEB_FRMCOUNT_GM45	0x71040
-#define _PIPEB_FLIPCOUNT_GM45	0x71044
+#define _PIPEBFRAMEHIGH		0x71040
+#define _PIPEBFRAMEPIXEL	0x71044
+#define _PIPEB_FRMCOUNT_GM45	(dev_priv->info->display_mmio_offset + 0x71040)
+#define _PIPEB_FLIPCOUNT_GM45	(dev_priv->info->display_mmio_offset + 0x71044)
 
 
 
 
 /* Display B control */
 /* Display B control */
@@ -3780,6 +3979,7 @@
 #define DE_SPRITEA_FLIP_DONE    (1 << 28)
 #define DE_SPRITEA_FLIP_DONE    (1 << 28)
 #define DE_PLANEB_FLIP_DONE     (1 << 27)
 #define DE_PLANEB_FLIP_DONE     (1 << 27)
 #define DE_PLANEA_FLIP_DONE     (1 << 26)
 #define DE_PLANEA_FLIP_DONE     (1 << 26)
+#define DE_PLANE_FLIP_DONE(plane) (1 << (26 + (plane)))
 #define DE_PCU_EVENT            (1 << 25)
 #define DE_PCU_EVENT            (1 << 25)
 #define DE_GTT_FAULT            (1 << 24)
 #define DE_GTT_FAULT            (1 << 24)
 #define DE_POISON               (1 << 23)
 #define DE_POISON               (1 << 23)
@@ -3793,13 +3993,18 @@
 #define DE_PIPEB_ODD_FIELD      (1 << 13)
 #define DE_PIPEB_ODD_FIELD      (1 << 13)
 #define DE_PIPEB_LINE_COMPARE   (1 << 12)
 #define DE_PIPEB_LINE_COMPARE   (1 << 12)
 #define DE_PIPEB_VSYNC          (1 << 11)
 #define DE_PIPEB_VSYNC          (1 << 11)
+#define DE_PIPEB_CRC_DONE	(1 << 10)
 #define DE_PIPEB_FIFO_UNDERRUN  (1 << 8)
 #define DE_PIPEB_FIFO_UNDERRUN  (1 << 8)
 #define DE_PIPEA_VBLANK         (1 << 7)
 #define DE_PIPEA_VBLANK         (1 << 7)
+#define DE_PIPE_VBLANK(pipe)    (1 << (7 + 8*(pipe)))
 #define DE_PIPEA_EVEN_FIELD     (1 << 6)
 #define DE_PIPEA_EVEN_FIELD     (1 << 6)
 #define DE_PIPEA_ODD_FIELD      (1 << 5)
 #define DE_PIPEA_ODD_FIELD      (1 << 5)
 #define DE_PIPEA_LINE_COMPARE   (1 << 4)
 #define DE_PIPEA_LINE_COMPARE   (1 << 4)
 #define DE_PIPEA_VSYNC          (1 << 3)
 #define DE_PIPEA_VSYNC          (1 << 3)
+#define DE_PIPEA_CRC_DONE	(1 << 2)
+#define DE_PIPE_CRC_DONE(pipe)	(1 << (2 + 8*(pipe)))
 #define DE_PIPEA_FIFO_UNDERRUN  (1 << 0)
 #define DE_PIPEA_FIFO_UNDERRUN  (1 << 0)
+#define DE_PIPE_FIFO_UNDERRUN(pipe)  (1 << (8*(pipe)))
 
 
 /* More Ivybridge lolz */
 /* More Ivybridge lolz */
 #define DE_ERR_INT_IVB			(1<<30)
 #define DE_ERR_INT_IVB			(1<<30)
@@ -3815,9 +4020,8 @@
 #define DE_PIPEB_VBLANK_IVB		(1<<5)
 #define DE_PIPEB_VBLANK_IVB		(1<<5)
 #define DE_SPRITEA_FLIP_DONE_IVB	(1<<4)
 #define DE_SPRITEA_FLIP_DONE_IVB	(1<<4)
 #define DE_PLANEA_FLIP_DONE_IVB		(1<<3)
 #define DE_PLANEA_FLIP_DONE_IVB		(1<<3)
+#define DE_PLANE_FLIP_DONE_IVB(plane)	(1<< (3 + 5*(plane)))
 #define DE_PIPEA_VBLANK_IVB		(1<<0)
 #define DE_PIPEA_VBLANK_IVB		(1<<0)
-
-#define DE_PIPE_VBLANK_ILK(pipe)	(1 << ((pipe * 8) + 7))
 #define DE_PIPE_VBLANK_IVB(pipe)	(1 << (pipe * 5))
 #define DE_PIPE_VBLANK_IVB(pipe)	(1 << (pipe * 5))
 
 
 #define VLV_MASTER_IER			0x4400c /* Gunit master IER */
 #define VLV_MASTER_IER			0x4400c /* Gunit master IER */
@@ -3833,6 +4037,71 @@
 #define GTIIR   0x44018
 #define GTIIR   0x44018
 #define GTIER   0x4401c
 #define GTIER   0x4401c
 
 
+#define GEN8_MASTER_IRQ			0x44200
+#define  GEN8_MASTER_IRQ_CONTROL	(1<<31)
+#define  GEN8_PCU_IRQ			(1<<30)
+#define  GEN8_DE_PCH_IRQ		(1<<23)
+#define  GEN8_DE_MISC_IRQ		(1<<22)
+#define  GEN8_DE_PORT_IRQ		(1<<20)
+#define  GEN8_DE_PIPE_C_IRQ		(1<<18)
+#define  GEN8_DE_PIPE_B_IRQ		(1<<17)
+#define  GEN8_DE_PIPE_A_IRQ		(1<<16)
+#define  GEN8_DE_PIPE_IRQ(pipe)		(1<<(16+pipe))
+#define  GEN8_GT_VECS_IRQ		(1<<6)
+#define  GEN8_GT_VCS2_IRQ		(1<<3)
+#define  GEN8_GT_VCS1_IRQ		(1<<2)
+#define  GEN8_GT_BCS_IRQ		(1<<1)
+#define  GEN8_GT_RCS_IRQ		(1<<0)
+
+#define GEN8_GT_ISR(which) (0x44300 + (0x10 * (which)))
+#define GEN8_GT_IMR(which) (0x44304 + (0x10 * (which)))
+#define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which)))
+#define GEN8_GT_IER(which) (0x4430c + (0x10 * (which)))
+
+#define GEN8_BCS_IRQ_SHIFT 16
+#define GEN8_RCS_IRQ_SHIFT 0
+#define GEN8_VCS2_IRQ_SHIFT 16
+#define GEN8_VCS1_IRQ_SHIFT 0
+#define GEN8_VECS_IRQ_SHIFT 0
+
+#define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IIR(pipe) (0x44408 + (0x10 * (pipe)))
+#define GEN8_DE_PIPE_IER(pipe) (0x4440c + (0x10 * (pipe)))
+#define  GEN8_PIPE_FIFO_UNDERRUN	(1 << 31)
+#define  GEN8_PIPE_CDCLK_CRC_ERROR	(1 << 29)
+#define  GEN8_PIPE_CDCLK_CRC_DONE	(1 << 28)
+#define  GEN8_PIPE_CURSOR_FAULT		(1 << 10)
+#define  GEN8_PIPE_SPRITE_FAULT		(1 << 9)
+#define  GEN8_PIPE_PRIMARY_FAULT	(1 << 8)
+#define  GEN8_PIPE_SPRITE_FLIP_DONE	(1 << 5)
+#define  GEN8_PIPE_FLIP_DONE		(1 << 4)
+#define  GEN8_PIPE_SCAN_LINE_EVENT	(1 << 2)
+#define  GEN8_PIPE_VSYNC		(1 << 1)
+#define  GEN8_PIPE_VBLANK		(1 << 0)
+#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
+	(GEN8_PIPE_CURSOR_FAULT | \
+	 GEN8_PIPE_SPRITE_FAULT | \
+	 GEN8_PIPE_PRIMARY_FAULT)
+
+#define GEN8_DE_PORT_ISR 0x44440
+#define GEN8_DE_PORT_IMR 0x44444
+#define GEN8_DE_PORT_IIR 0x44448
+#define GEN8_DE_PORT_IER 0x4444c
+#define  GEN8_PORT_DP_A_HOTPLUG		(1 << 3)
+#define  GEN8_AUX_CHANNEL_A		(1 << 0)
+
+#define GEN8_DE_MISC_ISR 0x44460
+#define GEN8_DE_MISC_IMR 0x44464
+#define GEN8_DE_MISC_IIR 0x44468
+#define GEN8_DE_MISC_IER 0x4446c
+#define  GEN8_DE_MISC_GSE		(1 << 27)
+
+#define GEN8_PCU_ISR 0x444e0
+#define GEN8_PCU_IMR 0x444e4
+#define GEN8_PCU_IIR 0x444e8
+#define GEN8_PCU_IER 0x444ec
+
 #define ILK_DISPLAY_CHICKEN2	0x42004
 #define ILK_DISPLAY_CHICKEN2	0x42004
 /* Required on all Ironlake and Sandybridge according to the B-Spec. */
 /* Required on all Ironlake and Sandybridge according to the B-Spec. */
 #define  ILK_ELPIN_409_SELECT	(1 << 25)
 #define  ILK_ELPIN_409_SELECT	(1 << 25)
@@ -3858,8 +4127,14 @@
 # define CHICKEN3_DGMG_DONE_FIX_DISABLE		(1 << 2)
 # define CHICKEN3_DGMG_DONE_FIX_DISABLE		(1 << 2)
 
 
 #define CHICKEN_PAR1_1		0x42080
 #define CHICKEN_PAR1_1		0x42080
+#define  DPA_MASK_VBLANK_SRD	(1 << 15)
 #define  FORCE_ARB_IDLE_PLANES	(1 << 14)
 #define  FORCE_ARB_IDLE_PLANES	(1 << 14)
 
 
+#define _CHICKEN_PIPESL_1_A	0x420b0
+#define _CHICKEN_PIPESL_1_B	0x420b4
+#define  DPRS_MASK_VBLANK_SRD	(1 << 0)
+#define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
+
 #define DISP_ARB_CTL	0x45000
 #define DISP_ARB_CTL	0x45000
 #define  DISP_TILE_SURFACE_SWIZZLING	(1<<13)
 #define  DISP_TILE_SURFACE_SWIZZLING	(1<<13)
 #define  DISP_FBC_WM_DIS		(1<<15)
 #define  DISP_FBC_WM_DIS		(1<<15)
@@ -3870,6 +4145,8 @@
 /* GEN7 chicken */
 /* GEN7 chicken */
 #define GEN7_COMMON_SLICE_CHICKEN1		0x7010
 #define GEN7_COMMON_SLICE_CHICKEN1		0x7010
 # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC	((1<<10) | (1<<26))
 # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC	((1<<10) | (1<<26))
+#define COMMON_SLICE_CHICKEN2			0x7014
+# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE	(1<<0)
 
 
 #define GEN7_L3CNTLREG1				0xB01C
 #define GEN7_L3CNTLREG1				0xB01C
 #define  GEN7_WA_FOR_GEN7_L3_CONTROL			0x3C4FFF8C
 #define  GEN7_WA_FOR_GEN7_L3_CONTROL			0x3C4FFF8C
@@ -4416,6 +4693,8 @@
 #define PIPEA_PP_STATUS         (VLV_DISPLAY_BASE + 0x61200)
 #define PIPEA_PP_STATUS         (VLV_DISPLAY_BASE + 0x61200)
 #define PIPEA_PP_CONTROL        (VLV_DISPLAY_BASE + 0x61204)
 #define PIPEA_PP_CONTROL        (VLV_DISPLAY_BASE + 0x61204)
 #define PIPEA_PP_ON_DELAYS      (VLV_DISPLAY_BASE + 0x61208)
 #define PIPEA_PP_ON_DELAYS      (VLV_DISPLAY_BASE + 0x61208)
+#define  PANEL_PORT_SELECT_DPB_VLV	(1 << 30)
+#define  PANEL_PORT_SELECT_DPC_VLV	(2 << 30)
 #define PIPEA_PP_OFF_DELAYS     (VLV_DISPLAY_BASE + 0x6120c)
 #define PIPEA_PP_OFF_DELAYS     (VLV_DISPLAY_BASE + 0x6120c)
 #define PIPEA_PP_DIVISOR        (VLV_DISPLAY_BASE + 0x61210)
 #define PIPEA_PP_DIVISOR        (VLV_DISPLAY_BASE + 0x61210)
 
 
@@ -4447,7 +4726,6 @@
 #define  PANEL_PORT_SELECT_MASK	(3 << 30)
 #define  PANEL_PORT_SELECT_MASK	(3 << 30)
 #define  PANEL_PORT_SELECT_LVDS	(0 << 30)
 #define  PANEL_PORT_SELECT_LVDS	(0 << 30)
 #define  PANEL_PORT_SELECT_DPA	(1 << 30)
 #define  PANEL_PORT_SELECT_DPA	(1 << 30)
-#define  EDP_PANEL		(1 << 30)
 #define  PANEL_PORT_SELECT_DPC	(2 << 30)
 #define  PANEL_PORT_SELECT_DPC	(2 << 30)
 #define  PANEL_PORT_SELECT_DPD	(3 << 30)
 #define  PANEL_PORT_SELECT_DPD	(3 << 30)
 #define  PANEL_POWER_UP_DELAY_MASK	(0x1fff0000)
 #define  PANEL_POWER_UP_DELAY_MASK	(0x1fff0000)
@@ -4456,11 +4734,6 @@
 #define  PANEL_LIGHT_ON_DELAY_SHIFT	0
 #define  PANEL_LIGHT_ON_DELAY_SHIFT	0
 
 
 #define PCH_PP_OFF_DELAYS	0xc720c
 #define PCH_PP_OFF_DELAYS	0xc720c
-#define  PANEL_POWER_PORT_SELECT_MASK	(0x3 << 30)
-#define  PANEL_POWER_PORT_LVDS		(0 << 30)
-#define  PANEL_POWER_PORT_DP_A		(1 << 30)
-#define  PANEL_POWER_PORT_DP_C		(2 << 30)
-#define  PANEL_POWER_PORT_DP_D		(3 << 30)
 #define  PANEL_POWER_DOWN_DELAY_MASK	(0x1fff0000)
 #define  PANEL_POWER_DOWN_DELAY_MASK	(0x1fff0000)
 #define  PANEL_POWER_DOWN_DELAY_SHIFT	16
 #define  PANEL_POWER_DOWN_DELAY_SHIFT	16
 #define  PANEL_LIGHT_OFF_DELAY_MASK	(0x1fff)
 #define  PANEL_LIGHT_OFF_DELAY_MASK	(0x1fff)
@@ -4638,7 +4911,7 @@
 #define   GEN6_RP_UP_IDLE_MIN			(0x1<<3)
 #define   GEN6_RP_UP_IDLE_MIN			(0x1<<3)
 #define   GEN6_RP_UP_BUSY_AVG			(0x2<<3)
 #define   GEN6_RP_UP_BUSY_AVG			(0x2<<3)
 #define   GEN6_RP_UP_BUSY_CONT			(0x4<<3)
 #define   GEN6_RP_UP_BUSY_CONT			(0x4<<3)
-#define   GEN7_RP_DOWN_IDLE_AVG			(0x2<<0)
+#define   GEN6_RP_DOWN_IDLE_AVG			(0x2<<0)
 #define   GEN6_RP_DOWN_IDLE_CONT		(0x1<<0)
 #define   GEN6_RP_DOWN_IDLE_CONT		(0x1<<0)
 #define GEN6_RP_UP_THRESHOLD			0xA02C
 #define GEN6_RP_UP_THRESHOLD			0xA02C
 #define GEN6_RP_DOWN_THRESHOLD			0xA030
 #define GEN6_RP_DOWN_THRESHOLD			0xA030
@@ -4683,6 +4956,10 @@
 						 GEN6_PM_RP_DOWN_TIMEOUT)
 						 GEN6_PM_RP_DOWN_TIMEOUT)
 
 
 #define GEN6_GT_GFX_RC6_LOCKED			0x138104
 #define GEN6_GT_GFX_RC6_LOCKED			0x138104
+#define VLV_COUNTER_CONTROL			0x138104
+#define   VLV_COUNT_RANGE_HIGH			(1<<15)
+#define   VLV_MEDIA_RC6_COUNT_EN		(1<<1)
+#define   VLV_RENDER_RC6_COUNT_EN		(1<<0)
 #define GEN6_GT_GFX_RC6				0x138108
 #define GEN6_GT_GFX_RC6				0x138108
 #define GEN6_GT_GFX_RC6p			0x13810C
 #define GEN6_GT_GFX_RC6p			0x13810C
 #define GEN6_GT_GFX_RC6pp			0x138110
 #define GEN6_GT_GFX_RC6pp			0x138110
@@ -4694,8 +4971,11 @@
 #define   GEN6_PCODE_READ_MIN_FREQ_TABLE	0x9
 #define   GEN6_PCODE_READ_MIN_FREQ_TABLE	0x9
 #define	  GEN6_PCODE_WRITE_RC6VIDS		0x4
 #define	  GEN6_PCODE_WRITE_RC6VIDS		0x4
 #define	  GEN6_PCODE_READ_RC6VIDS		0x5
 #define	  GEN6_PCODE_READ_RC6VIDS		0x5
+#define   GEN6_PCODE_READ_D_COMP		0x10
+#define   GEN6_PCODE_WRITE_D_COMP		0x11
 #define   GEN6_ENCODE_RC6_VID(mv)		(((mv) - 245) / 5)
 #define   GEN6_ENCODE_RC6_VID(mv)		(((mv) - 245) / 5)
 #define   GEN6_DECODE_RC6_VID(vids)		(((vids) * 5) + 245)
 #define   GEN6_DECODE_RC6_VID(vids)		(((vids) * 5) + 245)
+#define   DISPLAY_IPS_CONTROL			0x19
 #define GEN6_PCODE_DATA				0x138128
 #define GEN6_PCODE_DATA				0x138128
 #define   GEN6_PCODE_FREQ_IA_RATIO_SHIFT	8
 #define   GEN6_PCODE_FREQ_IA_RATIO_SHIFT	8
 #define   GEN6_PCODE_FREQ_RING_RATIO_SHIFT	16
 #define   GEN6_PCODE_FREQ_RING_RATIO_SHIFT	16
@@ -4713,6 +4993,7 @@
 
 
 /* IVYBRIDGE DPF */
 /* IVYBRIDGE DPF */
 #define GEN7_L3CDERRST1			0xB008 /* L3CD Error Status 1 */
 #define GEN7_L3CDERRST1			0xB008 /* L3CD Error Status 1 */
+#define HSW_L3CDERRST11			0xB208 /* L3CD Error Status register 1 slice 1 */
 #define   GEN7_L3CDERRST1_ROW_MASK	(0x7ff<<14)
 #define   GEN7_L3CDERRST1_ROW_MASK	(0x7ff<<14)
 #define   GEN7_PARITY_ERROR_VALID	(1<<13)
 #define   GEN7_PARITY_ERROR_VALID	(1<<13)
 #define   GEN7_L3CDERRST1_BANK_MASK	(3<<11)
 #define   GEN7_L3CDERRST1_BANK_MASK	(3<<11)
@@ -4726,11 +5007,13 @@
 #define   GEN7_L3CDERRST1_ENABLE	(1<<7)
 #define   GEN7_L3CDERRST1_ENABLE	(1<<7)
 
 
 #define GEN7_L3LOG_BASE			0xB070
 #define GEN7_L3LOG_BASE			0xB070
+#define HSW_L3LOG_BASE_SLICE1		0xB270
 #define GEN7_L3LOG_SIZE			0x80
 #define GEN7_L3LOG_SIZE			0x80
 
 
 #define GEN7_HALF_SLICE_CHICKEN1	0xe100 /* IVB GT1 + VLV */
 #define GEN7_HALF_SLICE_CHICKEN1	0xe100 /* IVB GT1 + VLV */
 #define GEN7_HALF_SLICE_CHICKEN1_GT2	0xf100
 #define GEN7_HALF_SLICE_CHICKEN1_GT2	0xf100
 #define   GEN7_MAX_PS_THREAD_DEP		(8<<12)
 #define   GEN7_MAX_PS_THREAD_DEP		(8<<12)
+#define   GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE	(1<<10)
 #define   GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE	(1<<3)
 #define   GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE	(1<<3)
 
 
 #define GEN7_ROW_CHICKEN2		0xe4f4
 #define GEN7_ROW_CHICKEN2		0xe4f4
@@ -4740,6 +5023,10 @@
 #define HSW_ROW_CHICKEN3		0xe49c
 #define HSW_ROW_CHICKEN3		0xe49c
 #define  HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE    (1 << 6)
 #define  HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE    (1 << 6)
 
 
+#define HALF_SLICE_CHICKEN3		0xe184
+#define   GEN8_CENTROID_PIXEL_OPT_DIS	(1<<8)
+#define   GEN8_SAMPLER_POWER_BYPASS_DIS	(1<<1)
+
 #define G4X_AUD_VID_DID			(dev_priv->info->display_mmio_offset + 0x62020)
 #define G4X_AUD_VID_DID			(dev_priv->info->display_mmio_offset + 0x62020)
 #define INTEL_AUDIO_DEVCL		0x808629FB
 #define INTEL_AUDIO_DEVCL		0x808629FB
 #define INTEL_AUDIO_DEVBLC		0x80862801
 #define INTEL_AUDIO_DEVBLC		0x80862801
@@ -4781,6 +5068,18 @@
 					CPT_AUD_CNTL_ST_B)
 					CPT_AUD_CNTL_ST_B)
 #define CPT_AUD_CNTRL_ST2		0xE50C0
 #define CPT_AUD_CNTRL_ST2		0xE50C0
 
 
+#define VLV_HDMIW_HDMIEDID_A		(VLV_DISPLAY_BASE + 0x62050)
+#define VLV_HDMIW_HDMIEDID_B		(VLV_DISPLAY_BASE + 0x62150)
+#define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+					VLV_HDMIW_HDMIEDID_A, \
+					VLV_HDMIW_HDMIEDID_B)
+#define VLV_AUD_CNTL_ST_A		(VLV_DISPLAY_BASE + 0x620B4)
+#define VLV_AUD_CNTL_ST_B		(VLV_DISPLAY_BASE + 0x621B4)
+#define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+					VLV_AUD_CNTL_ST_A, \
+					VLV_AUD_CNTL_ST_B)
+#define VLV_AUD_CNTL_ST2		(VLV_DISPLAY_BASE + 0x620C0)
+
 /* These are the 4 32-bit write offset registers for each stream
 /* These are the 4 32-bit write offset registers for each stream
  * output buffer.  It determines the offset from the
  * output buffer.  It determines the offset from the
  * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
  * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
@@ -4797,6 +5096,12 @@
 #define CPT_AUD_CFG(pipe) _PIPE(pipe, \
 #define CPT_AUD_CFG(pipe) _PIPE(pipe, \
 					CPT_AUD_CONFIG_A, \
 					CPT_AUD_CONFIG_A, \
 					CPT_AUD_CONFIG_B)
 					CPT_AUD_CONFIG_B)
+#define VLV_AUD_CONFIG_A		(VLV_DISPLAY_BASE + 0x62000)
+#define VLV_AUD_CONFIG_B		(VLV_DISPLAY_BASE + 0x62100)
+#define VLV_AUD_CFG(pipe) _PIPE(pipe, \
+					VLV_AUD_CONFIG_A, \
+					VLV_AUD_CONFIG_B)
+
 #define   AUD_CONFIG_N_VALUE_INDEX		(1 << 29)
 #define   AUD_CONFIG_N_VALUE_INDEX		(1 << 29)
 #define   AUD_CONFIG_N_PROG_ENABLE		(1 << 28)
 #define   AUD_CONFIG_N_PROG_ENABLE		(1 << 28)
 #define   AUD_CONFIG_UPPER_N_SHIFT		20
 #define   AUD_CONFIG_UPPER_N_SHIFT		20
@@ -4804,7 +5109,17 @@
 #define   AUD_CONFIG_LOWER_N_SHIFT		4
 #define   AUD_CONFIG_LOWER_N_SHIFT		4
 #define   AUD_CONFIG_LOWER_N_VALUE		(0xfff << 4)
 #define   AUD_CONFIG_LOWER_N_VALUE		(0xfff << 4)
 #define   AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT	16
 #define   AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT	16
-#define   AUD_CONFIG_PIXEL_CLOCK_HDMI		(0xf << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK	(0xf << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_25175	(0 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_25200	(1 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_27000	(2 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_27027	(3 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_54000	(4 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_54054	(5 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_74176	(6 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_74250	(7 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_148352	(8 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_148500	(9 << 16)
 #define   AUD_CONFIG_DISABLE_NCTS		(1 << 3)
 #define   AUD_CONFIG_DISABLE_NCTS		(1 << 3)
 
 
 /* HSW Audio */
 /* HSW Audio */
@@ -4929,6 +5244,7 @@
 #define DDI_BUF_CTL_B				0x64100
 #define DDI_BUF_CTL_B				0x64100
 #define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
 #define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
 #define  DDI_BUF_CTL_ENABLE			(1<<31)
 #define  DDI_BUF_CTL_ENABLE			(1<<31)
+/* Haswell */
 #define  DDI_BUF_EMP_400MV_0DB_HSW		(0<<24)   /* Sel0 */
 #define  DDI_BUF_EMP_400MV_0DB_HSW		(0<<24)   /* Sel0 */
 #define  DDI_BUF_EMP_400MV_3_5DB_HSW		(1<<24)   /* Sel1 */
 #define  DDI_BUF_EMP_400MV_3_5DB_HSW		(1<<24)   /* Sel1 */
 #define  DDI_BUF_EMP_400MV_6DB_HSW		(2<<24)   /* Sel2 */
 #define  DDI_BUF_EMP_400MV_6DB_HSW		(2<<24)   /* Sel2 */
@@ -4938,6 +5254,16 @@
 #define  DDI_BUF_EMP_600MV_6DB_HSW		(6<<24)   /* Sel6 */
 #define  DDI_BUF_EMP_600MV_6DB_HSW		(6<<24)   /* Sel6 */
 #define  DDI_BUF_EMP_800MV_0DB_HSW		(7<<24)   /* Sel7 */
 #define  DDI_BUF_EMP_800MV_0DB_HSW		(7<<24)   /* Sel7 */
 #define  DDI_BUF_EMP_800MV_3_5DB_HSW		(8<<24)   /* Sel8 */
 #define  DDI_BUF_EMP_800MV_3_5DB_HSW		(8<<24)   /* Sel8 */
+/* Broadwell */
+#define  DDI_BUF_EMP_400MV_0DB_BDW		(0<<24)   /* Sel0 */
+#define  DDI_BUF_EMP_400MV_3_5DB_BDW		(1<<24)   /* Sel1 */
+#define  DDI_BUF_EMP_400MV_6DB_BDW		(2<<24)   /* Sel2 */
+#define  DDI_BUF_EMP_600MV_0DB_BDW		(3<<24)   /* Sel3 */
+#define  DDI_BUF_EMP_600MV_3_5DB_BDW		(4<<24)   /* Sel4 */
+#define  DDI_BUF_EMP_600MV_6DB_BDW		(5<<24)   /* Sel5 */
+#define  DDI_BUF_EMP_800MV_0DB_BDW		(6<<24)   /* Sel6 */
+#define  DDI_BUF_EMP_800MV_3_5DB_BDW		(7<<24)   /* Sel7 */
+#define  DDI_BUF_EMP_1200MV_0DB_BDW		(8<<24)   /* Sel8 */
 #define  DDI_BUF_EMP_MASK			(0xf<<24)
 #define  DDI_BUF_EMP_MASK			(0xf<<24)
 #define  DDI_BUF_PORT_REVERSAL			(1<<16)
 #define  DDI_BUF_PORT_REVERSAL			(1<<16)
 #define  DDI_BUF_IS_IDLE			(1<<7)
 #define  DDI_BUF_IS_IDLE			(1<<7)
@@ -5047,6 +5373,9 @@
 #define  LCPLL_PLL_LOCK			(1<<30)
 #define  LCPLL_PLL_LOCK			(1<<30)
 #define  LCPLL_CLK_FREQ_MASK		(3<<26)
 #define  LCPLL_CLK_FREQ_MASK		(3<<26)
 #define  LCPLL_CLK_FREQ_450		(0<<26)
 #define  LCPLL_CLK_FREQ_450		(0<<26)
+#define  LCPLL_CLK_FREQ_54O_BDW		(1<<26)
+#define  LCPLL_CLK_FREQ_337_5_BDW	(2<<26)
+#define  LCPLL_CLK_FREQ_675_BDW		(3<<26)
 #define  LCPLL_CD_CLOCK_DISABLE		(1<<25)
 #define  LCPLL_CD_CLOCK_DISABLE		(1<<25)
 #define  LCPLL_CD2X_CLOCK_DISABLE	(1<<23)
 #define  LCPLL_CD2X_CLOCK_DISABLE	(1<<23)
 #define  LCPLL_POWER_DOWN_ALLOW		(1<<22)
 #define  LCPLL_POWER_DOWN_ALLOW		(1<<22)
@@ -5128,4 +5457,414 @@
 #define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
 #define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
 #define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
 #define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
 
 
+/* VLV MIPI registers */
+
+#define _MIPIA_PORT_CTRL			(VLV_DISPLAY_BASE + 0x61190)
+#define _MIPIB_PORT_CTRL			(VLV_DISPLAY_BASE + 0x61700)
+#define MIPI_PORT_CTRL(pipe)		_PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL)
+#define  DPI_ENABLE					(1 << 31) /* A + B */
+#define  MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT		27
+#define  MIPIA_MIPI4DPHY_DELAY_COUNT_MASK		(0xf << 27)
+#define  DUAL_LINK_MODE_MASK				(1 << 26)
+#define  DUAL_LINK_MODE_FRONT_BACK			(0 << 26)
+#define  DUAL_LINK_MODE_PIXEL_ALTERNATIVE		(1 << 26)
+#define  DITHERING_ENABLE				(1 << 25) /* A + B */
+#define  FLOPPED_HSTX					(1 << 23)
+#define  DE_INVERT					(1 << 19) /* XXX */
+#define  MIPIA_FLISDSI_DELAY_COUNT_SHIFT		18
+#define  MIPIA_FLISDSI_DELAY_COUNT_MASK			(0xf << 18)
+#define  AFE_LATCHOUT					(1 << 17)
+#define  LP_OUTPUT_HOLD					(1 << 16)
+#define  MIPIB_FLISDSI_DELAY_COUNT_HIGH_SHIFT		15
+#define  MIPIB_FLISDSI_DELAY_COUNT_HIGH_MASK		(1 << 15)
+#define  MIPIB_MIPI4DPHY_DELAY_COUNT_SHIFT		11
+#define  MIPIB_MIPI4DPHY_DELAY_COUNT_MASK		(0xf << 11)
+#define  CSB_SHIFT					9
+#define  CSB_MASK					(3 << 9)
+#define  CSB_20MHZ					(0 << 9)
+#define  CSB_10MHZ					(1 << 9)
+#define  CSB_40MHZ					(2 << 9)
+#define  BANDGAP_MASK					(1 << 8)
+#define  BANDGAP_PNW_CIRCUIT				(0 << 8)
+#define  BANDGAP_LNC_CIRCUIT				(1 << 8)
+#define  MIPIB_FLISDSI_DELAY_COUNT_LOW_SHIFT		5
+#define  MIPIB_FLISDSI_DELAY_COUNT_LOW_MASK		(7 << 5)
+#define  TEARING_EFFECT_DELAY				(1 << 4) /* A + B */
+#define  TEARING_EFFECT_SHIFT				2 /* A + B */
+#define  TEARING_EFFECT_MASK				(3 << 2)
+#define  TEARING_EFFECT_OFF				(0 << 2)
+#define  TEARING_EFFECT_DSI				(1 << 2)
+#define  TEARING_EFFECT_GPIO				(2 << 2)
+#define  LANE_CONFIGURATION_SHIFT			0
+#define  LANE_CONFIGURATION_MASK			(3 << 0)
+#define  LANE_CONFIGURATION_4LANE			(0 << 0)
+#define  LANE_CONFIGURATION_DUAL_LINK_A			(1 << 0)
+#define  LANE_CONFIGURATION_DUAL_LINK_B			(2 << 0)
+
+#define _MIPIA_TEARING_CTRL			(VLV_DISPLAY_BASE + 0x61194)
+#define _MIPIB_TEARING_CTRL			(VLV_DISPLAY_BASE + 0x61704)
+#define MIPI_TEARING_CTRL(pipe)		_PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
+#define  TEARING_EFFECT_DELAY_SHIFT			0
+#define  TEARING_EFFECT_DELAY_MASK			(0xffff << 0)
+
+/* XXX: all bits reserved */
+#define _MIPIA_AUTOPWG				(VLV_DISPLAY_BASE + 0x611a0)
+
+/* MIPI DSI Controller and D-PHY registers */
+
+#define _MIPIA_DEVICE_READY			(VLV_DISPLAY_BASE + 0xb000)
+#define _MIPIB_DEVICE_READY			(VLV_DISPLAY_BASE + 0xb800)
+#define MIPI_DEVICE_READY(pipe)		_PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY)
+#define  BUS_POSSESSION					(1 << 3) /* set to give bus to receiver */
+#define  ULPS_STATE_MASK				(3 << 1)
+#define  ULPS_STATE_ENTER				(2 << 1)
+#define  ULPS_STATE_EXIT				(1 << 1)
+#define  ULPS_STATE_NORMAL_OPERATION			(0 << 1)
+#define  DEVICE_READY					(1 << 0)
+
+#define _MIPIA_INTR_STAT			(VLV_DISPLAY_BASE + 0xb004)
+#define _MIPIB_INTR_STAT			(VLV_DISPLAY_BASE + 0xb804)
+#define MIPI_INTR_STAT(pipe)		_PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT)
+#define _MIPIA_INTR_EN				(VLV_DISPLAY_BASE + 0xb008)
+#define _MIPIB_INTR_EN				(VLV_DISPLAY_BASE + 0xb808)
+#define MIPI_INTR_EN(pipe)		_PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN)
+#define  TEARING_EFFECT					(1 << 31)
+#define  SPL_PKT_SENT_INTERRUPT				(1 << 30)
+#define  GEN_READ_DATA_AVAIL				(1 << 29)
+#define  LP_GENERIC_WR_FIFO_FULL			(1 << 28)
+#define  HS_GENERIC_WR_FIFO_FULL			(1 << 27)
+#define  RX_PROT_VIOLATION				(1 << 26)
+#define  RX_INVALID_TX_LENGTH				(1 << 25)
+#define  ACK_WITH_NO_ERROR				(1 << 24)
+#define  TURN_AROUND_ACK_TIMEOUT			(1 << 23)
+#define  LP_RX_TIMEOUT					(1 << 22)
+#define  HS_TX_TIMEOUT					(1 << 21)
+#define  DPI_FIFO_UNDERRUN				(1 << 20)
+#define  LOW_CONTENTION					(1 << 19)
+#define  HIGH_CONTENTION				(1 << 18)
+#define  TXDSI_VC_ID_INVALID				(1 << 17)
+#define  TXDSI_DATA_TYPE_NOT_RECOGNISED			(1 << 16)
+#define  TXCHECKSUM_ERROR				(1 << 15)
+#define  TXECC_MULTIBIT_ERROR				(1 << 14)
+#define  TXECC_SINGLE_BIT_ERROR				(1 << 13)
+#define  TXFALSE_CONTROL_ERROR				(1 << 12)
+#define  RXDSI_VC_ID_INVALID				(1 << 11)
+#define  RXDSI_DATA_TYPE_NOT_REGOGNISED			(1 << 10)
+#define  RXCHECKSUM_ERROR				(1 << 9)
+#define  RXECC_MULTIBIT_ERROR				(1 << 8)
+#define  RXECC_SINGLE_BIT_ERROR				(1 << 7)
+#define  RXFALSE_CONTROL_ERROR				(1 << 6)
+#define  RXHS_RECEIVE_TIMEOUT_ERROR			(1 << 5)
+#define  RX_LP_TX_SYNC_ERROR				(1 << 4)
+#define  RXEXCAPE_MODE_ENTRY_ERROR			(1 << 3)
+#define  RXEOT_SYNC_ERROR				(1 << 2)
+#define  RXSOT_SYNC_ERROR				(1 << 1)
+#define  RXSOT_ERROR					(1 << 0)
+
+#define _MIPIA_DSI_FUNC_PRG			(VLV_DISPLAY_BASE + 0xb00c)
+#define _MIPIB_DSI_FUNC_PRG			(VLV_DISPLAY_BASE + 0xb80c)
+#define MIPI_DSI_FUNC_PRG(pipe)		_PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG)
+#define  CMD_MODE_DATA_WIDTH_MASK			(7 << 13)
+#define  CMD_MODE_NOT_SUPPORTED				(0 << 13)
+#define  CMD_MODE_DATA_WIDTH_16_BIT			(1 << 13)
+#define  CMD_MODE_DATA_WIDTH_9_BIT			(2 << 13)
+#define  CMD_MODE_DATA_WIDTH_8_BIT			(3 << 13)
+#define  CMD_MODE_DATA_WIDTH_OPTION1			(4 << 13)
+#define  CMD_MODE_DATA_WIDTH_OPTION2			(5 << 13)
+#define  VID_MODE_FORMAT_MASK				(0xf << 7)
+#define  VID_MODE_NOT_SUPPORTED				(0 << 7)
+#define  VID_MODE_FORMAT_RGB565				(1 << 7)
+#define  VID_MODE_FORMAT_RGB666				(2 << 7)
+#define  VID_MODE_FORMAT_RGB666_LOOSE			(3 << 7)
+#define  VID_MODE_FORMAT_RGB888				(4 << 7)
+#define  CMD_MODE_CHANNEL_NUMBER_SHIFT			5
+#define  CMD_MODE_CHANNEL_NUMBER_MASK			(3 << 5)
+#define  VID_MODE_CHANNEL_NUMBER_SHIFT			3
+#define  VID_MODE_CHANNEL_NUMBER_MASK			(3 << 3)
+#define  DATA_LANES_PRG_REG_SHIFT			0
+#define  DATA_LANES_PRG_REG_MASK			(7 << 0)
+
+#define _MIPIA_HS_TX_TIMEOUT			(VLV_DISPLAY_BASE + 0xb010)
+#define _MIPIB_HS_TX_TIMEOUT			(VLV_DISPLAY_BASE + 0xb810)
+#define MIPI_HS_TX_TIMEOUT(pipe)	_PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT)
+#define  HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK		0xffffff
+
+#define _MIPIA_LP_RX_TIMEOUT			(VLV_DISPLAY_BASE + 0xb014)
+#define _MIPIB_LP_RX_TIMEOUT			(VLV_DISPLAY_BASE + 0xb814)
+#define MIPI_LP_RX_TIMEOUT(pipe)	_PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT)
+#define  LOW_POWER_RX_TIMEOUT_COUNTER_MASK		0xffffff
+
+#define _MIPIA_TURN_AROUND_TIMEOUT		(VLV_DISPLAY_BASE + 0xb018)
+#define _MIPIB_TURN_AROUND_TIMEOUT		(VLV_DISPLAY_BASE + 0xb818)
+#define MIPI_TURN_AROUND_TIMEOUT(pipe)	_PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
+#define  TURN_AROUND_TIMEOUT_MASK			0x3f
+
+#define _MIPIA_DEVICE_RESET_TIMER		(VLV_DISPLAY_BASE + 0xb01c)
+#define _MIPIB_DEVICE_RESET_TIMER		(VLV_DISPLAY_BASE + 0xb81c)
+#define MIPI_DEVICE_RESET_TIMER(pipe)	_PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
+#define  DEVICE_RESET_TIMER_MASK			0xffff
+
+#define _MIPIA_DPI_RESOLUTION			(VLV_DISPLAY_BASE + 0xb020)
+#define _MIPIB_DPI_RESOLUTION			(VLV_DISPLAY_BASE + 0xb820)
+#define MIPI_DPI_RESOLUTION(pipe)	_PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION)
+#define  VERTICAL_ADDRESS_SHIFT				16
+#define  VERTICAL_ADDRESS_MASK				(0xffff << 16)
+#define  HORIZONTAL_ADDRESS_SHIFT			0
+#define  HORIZONTAL_ADDRESS_MASK			0xffff
+
+#define _MIPIA_DBI_FIFO_THROTTLE		(VLV_DISPLAY_BASE + 0xb024)
+#define _MIPIB_DBI_FIFO_THROTTLE		(VLV_DISPLAY_BASE + 0xb824)
+#define MIPI_DBI_FIFO_THROTTLE(pipe)	_PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
+#define  DBI_FIFO_EMPTY_HALF				(0 << 0)
+#define  DBI_FIFO_EMPTY_QUARTER				(1 << 0)
+#define  DBI_FIFO_EMPTY_7_LOCATIONS			(2 << 0)
+
+/* regs below are bits 15:0 */
+#define _MIPIA_HSYNC_PADDING_COUNT		(VLV_DISPLAY_BASE + 0xb028)
+#define _MIPIB_HSYNC_PADDING_COUNT		(VLV_DISPLAY_BASE + 0xb828)
+#define MIPI_HSYNC_PADDING_COUNT(pipe)	_PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
+
+#define _MIPIA_HBP_COUNT			(VLV_DISPLAY_BASE + 0xb02c)
+#define _MIPIB_HBP_COUNT			(VLV_DISPLAY_BASE + 0xb82c)
+#define MIPI_HBP_COUNT(pipe)		_PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT)
+
+#define _MIPIA_HFP_COUNT			(VLV_DISPLAY_BASE + 0xb030)
+#define _MIPIB_HFP_COUNT			(VLV_DISPLAY_BASE + 0xb830)
+#define MIPI_HFP_COUNT(pipe)		_PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT)
+
+#define _MIPIA_HACTIVE_AREA_COUNT		(VLV_DISPLAY_BASE + 0xb034)
+#define _MIPIB_HACTIVE_AREA_COUNT		(VLV_DISPLAY_BASE + 0xb834)
+#define MIPI_HACTIVE_AREA_COUNT(pipe)	_PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
+
+#define _MIPIA_VSYNC_PADDING_COUNT		(VLV_DISPLAY_BASE + 0xb038)
+#define _MIPIB_VSYNC_PADDING_COUNT		(VLV_DISPLAY_BASE + 0xb838)
+#define MIPI_VSYNC_PADDING_COUNT(pipe)	_PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
+
+#define _MIPIA_VBP_COUNT			(VLV_DISPLAY_BASE + 0xb03c)
+#define _MIPIB_VBP_COUNT			(VLV_DISPLAY_BASE + 0xb83c)
+#define MIPI_VBP_COUNT(pipe)		_PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT)
+
+#define _MIPIA_VFP_COUNT			(VLV_DISPLAY_BASE + 0xb040)
+#define _MIPIB_VFP_COUNT			(VLV_DISPLAY_BASE + 0xb840)
+#define MIPI_VFP_COUNT(pipe)		_PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT)
+
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT		(VLV_DISPLAY_BASE + 0xb044)
+#define _MIPIB_HIGH_LOW_SWITCH_COUNT		(VLV_DISPLAY_BASE + 0xb844)
+#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe)	_PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
+/* regs above are bits 15:0 */
+
+#define _MIPIA_DPI_CONTROL			(VLV_DISPLAY_BASE + 0xb048)
+#define _MIPIB_DPI_CONTROL			(VLV_DISPLAY_BASE + 0xb848)
+#define MIPI_DPI_CONTROL(pipe)		_PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL)
+#define  DPI_LP_MODE					(1 << 6)
+#define  BACKLIGHT_OFF					(1 << 5)
+#define  BACKLIGHT_ON					(1 << 4)
+#define  COLOR_MODE_OFF					(1 << 3)
+#define  COLOR_MODE_ON					(1 << 2)
+#define  TURN_ON					(1 << 1)
+#define  SHUTDOWN					(1 << 0)
+
+#define _MIPIA_DPI_DATA				(VLV_DISPLAY_BASE + 0xb04c)
+#define _MIPIB_DPI_DATA				(VLV_DISPLAY_BASE + 0xb84c)
+#define MIPI_DPI_DATA(pipe)		_PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA)
+#define  COMMAND_BYTE_SHIFT				0
+#define  COMMAND_BYTE_MASK				(0x3f << 0)
+
+#define _MIPIA_INIT_COUNT			(VLV_DISPLAY_BASE + 0xb050)
+#define _MIPIB_INIT_COUNT			(VLV_DISPLAY_BASE + 0xb850)
+#define MIPI_INIT_COUNT(pipe)		_PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT)
+#define  MASTER_INIT_TIMER_SHIFT			0
+#define  MASTER_INIT_TIMER_MASK				(0xffff << 0)
+
+#define _MIPIA_MAX_RETURN_PKT_SIZE		(VLV_DISPLAY_BASE + 0xb054)
+#define _MIPIB_MAX_RETURN_PKT_SIZE		(VLV_DISPLAY_BASE + 0xb854)
+#define MIPI_MAX_RETURN_PKT_SIZE(pipe)	_PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
+#define  MAX_RETURN_PKT_SIZE_SHIFT			0
+#define  MAX_RETURN_PKT_SIZE_MASK			(0x3ff << 0)
+
+#define _MIPIA_VIDEO_MODE_FORMAT		(VLV_DISPLAY_BASE + 0xb058)
+#define _MIPIB_VIDEO_MODE_FORMAT		(VLV_DISPLAY_BASE + 0xb858)
+#define MIPI_VIDEO_MODE_FORMAT(pipe)	_PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
+#define  RANDOM_DPI_DISPLAY_RESOLUTION			(1 << 4)
+#define  DISABLE_VIDEO_BTA				(1 << 3)
+#define  IP_TG_CONFIG					(1 << 2)
+#define  VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE		(1 << 0)
+#define  VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS		(2 << 0)
+#define  VIDEO_MODE_BURST				(3 << 0)
+
+#define _MIPIA_EOT_DISABLE			(VLV_DISPLAY_BASE + 0xb05c)
+#define _MIPIB_EOT_DISABLE			(VLV_DISPLAY_BASE + 0xb85c)
+#define MIPI_EOT_DISABLE(pipe)		_PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE)
+#define  LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE		(1 << 7)
+#define  HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE		(1 << 6)
+#define  LOW_CONTENTION_RECOVERY_DISABLE		(1 << 5)
+#define  HIGH_CONTENTION_RECOVERY_DISABLE		(1 << 4)
+#define  TXDSI_TYPE_NOT_RECOGNISED_ERROR_RECOVERY_DISABLE (1 << 3)
+#define  TXECC_MULTIBIT_ERROR_RECOVERY_DISABLE		(1 << 2)
+#define  CLOCKSTOP					(1 << 1)
+#define  EOT_DISABLE					(1 << 0)
+
+#define _MIPIA_LP_BYTECLK			(VLV_DISPLAY_BASE + 0xb060)
+#define _MIPIB_LP_BYTECLK			(VLV_DISPLAY_BASE + 0xb860)
+#define MIPI_LP_BYTECLK(pipe)		_PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK)
+#define  LP_BYTECLK_SHIFT				0
+#define  LP_BYTECLK_MASK				(0xffff << 0)
+
+/* bits 31:0 */
+#define _MIPIA_LP_GEN_DATA			(VLV_DISPLAY_BASE + 0xb064)
+#define _MIPIB_LP_GEN_DATA			(VLV_DISPLAY_BASE + 0xb864)
+#define MIPI_LP_GEN_DATA(pipe)		_PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA)
+
+/* bits 31:0 */
+#define _MIPIA_HS_GEN_DATA			(VLV_DISPLAY_BASE + 0xb068)
+#define _MIPIB_HS_GEN_DATA			(VLV_DISPLAY_BASE + 0xb868)
+#define MIPI_HS_GEN_DATA(pipe)		_PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA)
+
+#define _MIPIA_LP_GEN_CTRL			(VLV_DISPLAY_BASE + 0xb06c)
+#define _MIPIB_LP_GEN_CTRL			(VLV_DISPLAY_BASE + 0xb86c)
+#define MIPI_LP_GEN_CTRL(pipe)		_PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL)
+#define _MIPIA_HS_GEN_CTRL			(VLV_DISPLAY_BASE + 0xb070)
+#define _MIPIB_HS_GEN_CTRL			(VLV_DISPLAY_BASE + 0xb870)
+#define MIPI_HS_GEN_CTRL(pipe)		_PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL)
+#define  LONG_PACKET_WORD_COUNT_SHIFT			8
+#define  LONG_PACKET_WORD_COUNT_MASK			(0xffff << 8)
+#define  SHORT_PACKET_PARAM_SHIFT			8
+#define  SHORT_PACKET_PARAM_MASK			(0xffff << 8)
+#define  VIRTUAL_CHANNEL_SHIFT				6
+#define  VIRTUAL_CHANNEL_MASK				(3 << 6)
+#define  DATA_TYPE_SHIFT				0
+#define  DATA_TYPE_MASK					(3f << 0)
+/* data type values, see include/video/mipi_display.h */
+
+#define _MIPIA_GEN_FIFO_STAT			(VLV_DISPLAY_BASE + 0xb074)
+#define _MIPIB_GEN_FIFO_STAT			(VLV_DISPLAY_BASE + 0xb874)
+#define MIPI_GEN_FIFO_STAT(pipe)	_PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT)
+#define  DPI_FIFO_EMPTY					(1 << 28)
+#define  DBI_FIFO_EMPTY					(1 << 27)
+#define  LP_CTRL_FIFO_EMPTY				(1 << 26)
+#define  LP_CTRL_FIFO_HALF_EMPTY			(1 << 25)
+#define  LP_CTRL_FIFO_FULL				(1 << 24)
+#define  HS_CTRL_FIFO_EMPTY				(1 << 18)
+#define  HS_CTRL_FIFO_HALF_EMPTY			(1 << 17)
+#define  HS_CTRL_FIFO_FULL				(1 << 16)
+#define  LP_DATA_FIFO_EMPTY				(1 << 10)
+#define  LP_DATA_FIFO_HALF_EMPTY			(1 << 9)
+#define  LP_DATA_FIFO_FULL				(1 << 8)
+#define  HS_DATA_FIFO_EMPTY				(1 << 2)
+#define  HS_DATA_FIFO_HALF_EMPTY			(1 << 1)
+#define  HS_DATA_FIFO_FULL				(1 << 0)
+
+#define _MIPIA_HS_LS_DBI_ENABLE			(VLV_DISPLAY_BASE + 0xb078)
+#define _MIPIB_HS_LS_DBI_ENABLE			(VLV_DISPLAY_BASE + 0xb878)
+#define MIPI_HS_LP_DBI_ENABLE(pipe)	_PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
+#define  DBI_HS_LP_MODE_MASK				(1 << 0)
+#define  DBI_LP_MODE					(1 << 0)
+#define  DBI_HS_MODE					(0 << 0)
+
+#define _MIPIA_DPHY_PARAM			(VLV_DISPLAY_BASE + 0xb080)
+#define _MIPIB_DPHY_PARAM			(VLV_DISPLAY_BASE + 0xb880)
+#define MIPI_DPHY_PARAM(pipe)		_PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM)
+#define  EXIT_ZERO_COUNT_SHIFT				24
+#define  EXIT_ZERO_COUNT_MASK				(0x3f << 24)
+#define  TRAIL_COUNT_SHIFT				16
+#define  TRAIL_COUNT_MASK				(0x1f << 16)
+#define  CLK_ZERO_COUNT_SHIFT				8
+#define  CLK_ZERO_COUNT_MASK				(0xff << 8)
+#define  PREPARE_COUNT_SHIFT				0
+#define  PREPARE_COUNT_MASK				(0x3f << 0)
+
+/* bits 31:0 */
+#define _MIPIA_DBI_BW_CTRL			(VLV_DISPLAY_BASE + 0xb084)
+#define _MIPIB_DBI_BW_CTRL			(VLV_DISPLAY_BASE + 0xb884)
+#define MIPI_DBI_BW_CTRL(pipe)		_PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL)
+
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT		(VLV_DISPLAY_BASE + 0xb088)
+#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT		(VLV_DISPLAY_BASE + 0xb888)
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe)	_PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
+#define  LP_HS_SSW_CNT_SHIFT				16
+#define  LP_HS_SSW_CNT_MASK				(0xffff << 16)
+#define  HS_LP_PWR_SW_CNT_SHIFT				0
+#define  HS_LP_PWR_SW_CNT_MASK				(0xffff << 0)
+
+#define _MIPIA_STOP_STATE_STALL			(VLV_DISPLAY_BASE + 0xb08c)
+#define _MIPIB_STOP_STATE_STALL			(VLV_DISPLAY_BASE + 0xb88c)
+#define MIPI_STOP_STATE_STALL(pipe)	_PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
+#define  STOP_STATE_STALL_COUNTER_SHIFT			0
+#define  STOP_STATE_STALL_COUNTER_MASK			(0xff << 0)
+
+#define _MIPIA_INTR_STAT_REG_1			(VLV_DISPLAY_BASE + 0xb090)
+#define _MIPIB_INTR_STAT_REG_1			(VLV_DISPLAY_BASE + 0xb890)
+#define MIPI_INTR_STAT_REG_1(pipe)	_PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
+#define _MIPIA_INTR_EN_REG_1			(VLV_DISPLAY_BASE + 0xb094)
+#define _MIPIB_INTR_EN_REG_1			(VLV_DISPLAY_BASE + 0xb894)
+#define MIPI_INTR_EN_REG_1(pipe)	_PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1)
+#define  RX_CONTENTION_DETECTED				(1 << 0)
+
+/* XXX: only pipe A ?!? */
+#define MIPIA_DBI_TYPEC_CTRL			(VLV_DISPLAY_BASE + 0xb100)
+#define  DBI_TYPEC_ENABLE				(1 << 31)
+#define  DBI_TYPEC_WIP					(1 << 30)
+#define  DBI_TYPEC_OPTION_SHIFT				28
+#define  DBI_TYPEC_OPTION_MASK				(3 << 28)
+#define  DBI_TYPEC_FREQ_SHIFT				24
+#define  DBI_TYPEC_FREQ_MASK				(0xf << 24)
+#define  DBI_TYPEC_OVERRIDE				(1 << 8)
+#define  DBI_TYPEC_OVERRIDE_COUNTER_SHIFT		0
+#define  DBI_TYPEC_OVERRIDE_COUNTER_MASK		(0xff << 0)
+
+
+/* MIPI adapter registers */
+
+#define _MIPIA_CTRL				(VLV_DISPLAY_BASE + 0xb104)
+#define _MIPIB_CTRL				(VLV_DISPLAY_BASE + 0xb904)
+#define MIPI_CTRL(pipe)			_PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL)
+#define  ESCAPE_CLOCK_DIVIDER_SHIFT			5 /* A only */
+#define  ESCAPE_CLOCK_DIVIDER_MASK			(3 << 5)
+#define  ESCAPE_CLOCK_DIVIDER_1				(0 << 5)
+#define  ESCAPE_CLOCK_DIVIDER_2				(1 << 5)
+#define  ESCAPE_CLOCK_DIVIDER_4				(2 << 5)
+#define  READ_REQUEST_PRIORITY_SHIFT			3
+#define  READ_REQUEST_PRIORITY_MASK			(3 << 3)
+#define  READ_REQUEST_PRIORITY_LOW			(0 << 3)
+#define  READ_REQUEST_PRIORITY_HIGH			(3 << 3)
+#define  RGB_FLIP_TO_BGR				(1 << 2)
+
+#define _MIPIA_DATA_ADDRESS			(VLV_DISPLAY_BASE + 0xb108)
+#define _MIPIB_DATA_ADDRESS			(VLV_DISPLAY_BASE + 0xb908)
+#define MIPI_DATA_ADDRESS(pipe)		_PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS)
+#define  DATA_MEM_ADDRESS_SHIFT				5
+#define  DATA_MEM_ADDRESS_MASK				(0x7ffffff << 5)
+#define  DATA_VALID					(1 << 0)
+
+#define _MIPIA_DATA_LENGTH			(VLV_DISPLAY_BASE + 0xb10c)
+#define _MIPIB_DATA_LENGTH			(VLV_DISPLAY_BASE + 0xb90c)
+#define MIPI_DATA_LENGTH(pipe)		_PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH)
+#define  DATA_LENGTH_SHIFT				0
+#define  DATA_LENGTH_MASK				(0xfffff << 0)
+
+#define _MIPIA_COMMAND_ADDRESS			(VLV_DISPLAY_BASE + 0xb110)
+#define _MIPIB_COMMAND_ADDRESS			(VLV_DISPLAY_BASE + 0xb910)
+#define MIPI_COMMAND_ADDRESS(pipe)	_PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
+#define  COMMAND_MEM_ADDRESS_SHIFT			5
+#define  COMMAND_MEM_ADDRESS_MASK			(0x7ffffff << 5)
+#define  AUTO_PWG_ENABLE				(1 << 2)
+#define  MEMORY_WRITE_DATA_FROM_PIPE_RENDERING		(1 << 1)
+#define  COMMAND_VALID					(1 << 0)
+
+#define _MIPIA_COMMAND_LENGTH			(VLV_DISPLAY_BASE + 0xb114)
+#define _MIPIB_COMMAND_LENGTH			(VLV_DISPLAY_BASE + 0xb914)
+#define MIPI_COMMAND_LENGTH(pipe)	_PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH)
+#define  COMMAND_LENGTH_SHIFT(n)			(8 * (n)) /* n: 0...3 */
+#define  COMMAND_LENGTH_MASK(n)				(0xff << (8 * (n)))
+
+#define _MIPIA_READ_DATA_RETURN0		(VLV_DISPLAY_BASE + 0xb118)
+#define _MIPIB_READ_DATA_RETURN0		(VLV_DISPLAY_BASE + 0xb918)
+#define MIPI_READ_DATA_RETURN(pipe, n) \
+	(_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
+
+#define _MIPIA_READ_DATA_VALID			(VLV_DISPLAY_BASE + 0xb138)
+#define _MIPIB_READ_DATA_VALID			(VLV_DISPLAY_BASE + 0xb938)
+#define MIPI_READ_DATA_VALID(pipe)	_PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
+#define  READ_DATA_VALID(n)				(1 << (n))
+
 #endif /* _I915_REG_H_ */
 #endif /* _I915_REG_H_ */

+ 40 - 4
drivers/gpu/drm/i915/i915_suspend.c

@@ -214,6 +214,22 @@ static void i915_save_display(struct drm_device *dev)
 		dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
 		dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
 		if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
 		if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
 			dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
 			dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
+	} else if (IS_VALLEYVIEW(dev)) {
+		dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
+		dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+
+		dev_priv->regfile.saveBLC_PWM_CTL =
+			I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
+		dev_priv->regfile.saveBLC_HIST_CTL =
+			I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
+		dev_priv->regfile.saveBLC_PWM_CTL2 =
+			I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
+		dev_priv->regfile.saveBLC_PWM_CTL_B =
+			I915_READ(VLV_BLC_PWM_CTL(PIPE_B));
+		dev_priv->regfile.saveBLC_HIST_CTL_B =
+			I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
+		dev_priv->regfile.saveBLC_PWM_CTL2_B =
+			I915_READ(VLV_BLC_PWM_CTL2(PIPE_B));
 	} else {
 	} else {
 		dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
 		dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
 		dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
 		dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
@@ -302,6 +318,19 @@ static void i915_restore_display(struct drm_device *dev)
 		I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
 		I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
 		I915_WRITE(RSTDBYCTL,
 		I915_WRITE(RSTDBYCTL,
 			   dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
 			   dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
+	} else if (IS_VALLEYVIEW(dev)) {
+		I915_WRITE(VLV_BLC_PWM_CTL(PIPE_A),
+			   dev_priv->regfile.saveBLC_PWM_CTL);
+		I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
+			   dev_priv->regfile.saveBLC_HIST_CTL);
+		I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_A),
+			   dev_priv->regfile.saveBLC_PWM_CTL2);
+		I915_WRITE(VLV_BLC_PWM_CTL(PIPE_B),
+			   dev_priv->regfile.saveBLC_PWM_CTL);
+		I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
+			   dev_priv->regfile.saveBLC_HIST_CTL);
+		I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_B),
+			   dev_priv->regfile.saveBLC_PWM_CTL2);
 	} else {
 	} else {
 		I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
 		I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
 		I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
 		I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
@@ -340,7 +369,9 @@ int i915_save_state(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int i;
 	int i;
 
 
-	pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB);
+	if (INTEL_INFO(dev)->gen <= 4)
+		pci_read_config_byte(dev->pdev, LBB,
+				     &dev_priv->regfile.saveLBB);
 
 
 	mutex_lock(&dev->struct_mutex);
 	mutex_lock(&dev->struct_mutex);
 
 
@@ -367,7 +398,8 @@ int i915_save_state(struct drm_device *dev)
 	intel_disable_gt_powersave(dev);
 	intel_disable_gt_powersave(dev);
 
 
 	/* Cache mode state */
 	/* Cache mode state */
-	dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+	if (INTEL_INFO(dev)->gen < 7)
+		dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
 
 
 	/* Memory Arbitration state */
 	/* Memory Arbitration state */
 	dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
 	dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
@@ -390,7 +422,9 @@ int i915_restore_state(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int i;
 	int i;
 
 
-	pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB);
+	if (INTEL_INFO(dev)->gen <= 4)
+		pci_write_config_byte(dev->pdev, LBB,
+				      dev_priv->regfile.saveLBB);
 
 
 	mutex_lock(&dev->struct_mutex);
 	mutex_lock(&dev->struct_mutex);
 
 
@@ -414,7 +448,9 @@ int i915_restore_state(struct drm_device *dev)
 	}
 	}
 
 
 	/* Cache mode state */
 	/* Cache mode state */
-	I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000);
+	if (INTEL_INFO(dev)->gen < 7)
+		I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
+			   0xffff0000);
 
 
 	/* Memory arbitration state */
 	/* Memory arbitration state */
 	I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
 	I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);

+ 106 - 46
drivers/gpu/drm/i915/i915_sysfs.c

@@ -32,30 +32,50 @@
 #include "intel_drv.h"
 #include "intel_drv.h"
 #include "i915_drv.h"
 #include "i915_drv.h"
 
 
+#define dev_to_drm_minor(d) dev_get_drvdata((d))
+
 #ifdef CONFIG_PM
 #ifdef CONFIG_PM
 static u32 calc_residency(struct drm_device *dev, const u32 reg)
 static u32 calc_residency(struct drm_device *dev, const u32 reg)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u64 raw_time; /* 32b value may overflow during fixed point math */
 	u64 raw_time; /* 32b value may overflow during fixed point math */
+	u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
 
 
 	if (!intel_enable_rc6(dev))
 	if (!intel_enable_rc6(dev))
 		return 0;
 		return 0;
 
 
-	raw_time = I915_READ(reg) * 128ULL;
-	return DIV_ROUND_UP_ULL(raw_time, 100000);
+	/* On VLV, residency time is in CZ units rather than 1.28us */
+	if (IS_VALLEYVIEW(dev)) {
+		u32 clkctl2;
+
+		clkctl2 = I915_READ(VLV_CLK_CTL2) >>
+			CLK_CTL2_CZCOUNT_30NS_SHIFT;
+		if (!clkctl2) {
+			WARN(!clkctl2, "bogus CZ count value");
+			return 0;
+		}
+		units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
+		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
+			units <<= 8;
+
+		div = 1000000ULL * bias;
+	}
+
+	raw_time = I915_READ(reg) * units;
+	return DIV_ROUND_UP_ULL(raw_time, div);
 }
 }
 
 
 static ssize_t
 static ssize_t
 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
 {
 {
-	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *dminor = dev_to_drm_minor(kdev);
 	return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
 	return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
 }
 }
 
 
 static ssize_t
 static ssize_t
 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
 {
-	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *dminor = dev_get_drvdata(kdev);
 	u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
 	u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
 }
 }
@@ -63,16 +83,20 @@ show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 static ssize_t
 static ssize_t
 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
 {
-	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *dminor = dev_to_drm_minor(kdev);
 	u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
 	u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
+	if (IS_VALLEYVIEW(dminor->dev))
+		rc6p_residency = 0;
 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
 }
 }
 
 
 static ssize_t
 static ssize_t
 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
 {
-	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *dminor = dev_to_drm_minor(kdev);
 	u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
 	u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
+	if (IS_VALLEYVIEW(dminor->dev))
+		rc6pp_residency = 0;
 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
 }
 }
 
 
@@ -97,7 +121,7 @@ static struct attribute_group rc6_attr_group = {
 
 
 static int l3_access_valid(struct drm_device *dev, loff_t offset)
 static int l3_access_valid(struct drm_device *dev, loff_t offset)
 {
 {
-	if (!HAS_L3_GPU_CACHE(dev))
+	if (!HAS_L3_DPF(dev))
 		return -EPERM;
 		return -EPERM;
 
 
 	if (offset % 4 != 0)
 	if (offset % 4 != 0)
@@ -115,31 +139,34 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
 	     loff_t offset, size_t count)
 	     loff_t offset, size_t count)
 {
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
 	struct device *dev = container_of(kobj, struct device, kobj);
-	struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+	struct drm_minor *dminor = dev_to_drm_minor(dev);
 	struct drm_device *drm_dev = dminor->dev;
 	struct drm_device *drm_dev = dminor->dev;
 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
-	uint32_t misccpctl;
-	int i, ret;
+	int slice = (int)(uintptr_t)attr->private;
+	int ret;
+
+	count = round_down(count, 4);
 
 
 	ret = l3_access_valid(drm_dev, offset);
 	ret = l3_access_valid(drm_dev, offset);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
+	count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
+
 	ret = i915_mutex_lock_interruptible(drm_dev);
 	ret = i915_mutex_lock_interruptible(drm_dev);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	misccpctl = I915_READ(GEN7_MISCCPCTL);
-	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
-
-	for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4)
-		*((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i);
-
-	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+	if (dev_priv->l3_parity.remap_info[slice])
+		memcpy(buf,
+		       dev_priv->l3_parity.remap_info[slice] + (offset/4),
+		       count);
+	else
+		memset(buf, 0, count);
 
 
 	mutex_unlock(&drm_dev->struct_mutex);
 	mutex_unlock(&drm_dev->struct_mutex);
 
 
-	return i - offset;
+	return count;
 }
 }
 
 
 static ssize_t
 static ssize_t
@@ -148,21 +175,26 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
 	      loff_t offset, size_t count)
 	      loff_t offset, size_t count)
 {
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
 	struct device *dev = container_of(kobj, struct device, kobj);
-	struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+	struct drm_minor *dminor = dev_to_drm_minor(dev);
 	struct drm_device *drm_dev = dminor->dev;
 	struct drm_device *drm_dev = dminor->dev;
 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
+	struct i915_hw_context *ctx;
 	u32 *temp = NULL; /* Just here to make handling failures easy */
 	u32 *temp = NULL; /* Just here to make handling failures easy */
+	int slice = (int)(uintptr_t)attr->private;
 	int ret;
 	int ret;
 
 
 	ret = l3_access_valid(drm_dev, offset);
 	ret = l3_access_valid(drm_dev, offset);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
+	if (dev_priv->hw_contexts_disabled)
+		return -ENXIO;
+
 	ret = i915_mutex_lock_interruptible(drm_dev);
 	ret = i915_mutex_lock_interruptible(drm_dev);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	if (!dev_priv->l3_parity.remap_info) {
+	if (!dev_priv->l3_parity.remap_info[slice]) {
 		temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
 		temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
 		if (!temp) {
 		if (!temp) {
 			mutex_unlock(&drm_dev->struct_mutex);
 			mutex_unlock(&drm_dev->struct_mutex);
@@ -182,13 +214,13 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
 	 * at this point it is left as a TODO.
 	 * at this point it is left as a TODO.
 	*/
 	*/
 	if (temp)
 	if (temp)
-		dev_priv->l3_parity.remap_info = temp;
+		dev_priv->l3_parity.remap_info[slice] = temp;
 
 
-	memcpy(dev_priv->l3_parity.remap_info + (offset/4),
-	       buf + (offset/4),
-	       count);
+	memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
 
 
-	i915_gem_l3_remap(drm_dev);
+	/* NB: We defer the remapping until we switch to the context */
+	list_for_each_entry(ctx, &dev_priv->context_list, link)
+		ctx->remap_slice |= (1<<slice);
 
 
 	mutex_unlock(&drm_dev->struct_mutex);
 	mutex_unlock(&drm_dev->struct_mutex);
 
 
@@ -200,17 +232,29 @@ static struct bin_attribute dpf_attrs = {
 	.size = GEN7_L3LOG_SIZE,
 	.size = GEN7_L3LOG_SIZE,
 	.read = i915_l3_read,
 	.read = i915_l3_read,
 	.write = i915_l3_write,
 	.write = i915_l3_write,
-	.mmap = NULL
+	.mmap = NULL,
+	.private = (void *)0
+};
+
+static struct bin_attribute dpf_attrs_1 = {
+	.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
+	.size = GEN7_L3LOG_SIZE,
+	.read = i915_l3_read,
+	.write = i915_l3_write,
+	.mmap = NULL,
+	.private = (void *)1
 };
 };
 
 
 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
 				    struct device_attribute *attr, char *buf)
 				    struct device_attribute *attr, char *buf)
 {
 {
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_device *dev = minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 	int ret;
 
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	mutex_lock(&dev_priv->rps.hw_lock);
 	mutex_lock(&dev_priv->rps.hw_lock);
 	if (IS_VALLEYVIEW(dev_priv->dev)) {
 	if (IS_VALLEYVIEW(dev_priv->dev)) {
 		u32 freq;
 		u32 freq;
@@ -227,7 +271,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
 				     struct device_attribute *attr, char *buf)
 				     struct device_attribute *attr, char *buf)
 {
 {
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_device *dev = minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 
@@ -238,11 +282,13 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
 
 
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
 {
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_device *dev = minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 	int ret;
 
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	mutex_lock(&dev_priv->rps.hw_lock);
 	mutex_lock(&dev_priv->rps.hw_lock);
 	if (IS_VALLEYVIEW(dev_priv->dev))
 	if (IS_VALLEYVIEW(dev_priv->dev))
 		ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
 		ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
@@ -257,7 +303,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 				     struct device_attribute *attr,
 				     struct device_attribute *attr,
 				     const char *buf, size_t count)
 				     const char *buf, size_t count)
 {
 {
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_device *dev = minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
 	u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
@@ -267,6 +313,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	mutex_lock(&dev_priv->rps.hw_lock);
 	mutex_lock(&dev_priv->rps.hw_lock);
 
 
 	if (IS_VALLEYVIEW(dev_priv->dev)) {
 	if (IS_VALLEYVIEW(dev_priv->dev)) {
@@ -310,11 +358,13 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 
 
 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
 {
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_device *dev = minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 	int ret;
 
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	mutex_lock(&dev_priv->rps.hw_lock);
 	mutex_lock(&dev_priv->rps.hw_lock);
 	if (IS_VALLEYVIEW(dev_priv->dev))
 	if (IS_VALLEYVIEW(dev_priv->dev))
 		ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
 		ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
@@ -329,7 +379,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 				     struct device_attribute *attr,
 				     struct device_attribute *attr,
 				     const char *buf, size_t count)
 				     const char *buf, size_t count)
 {
 {
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_device *dev = minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val, rp_state_cap, hw_max, hw_min;
 	u32 val, rp_state_cap, hw_max, hw_min;
@@ -339,6 +389,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	mutex_lock(&dev_priv->rps.hw_lock);
 	mutex_lock(&dev_priv->rps.hw_lock);
 
 
 	if (IS_VALLEYVIEW(dev)) {
 	if (IS_VALLEYVIEW(dev)) {
@@ -388,7 +440,7 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
 /* For now we have a static number of RP states */
 /* For now we have a static number of RP states */
 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
 {
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_device *dev = minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val, rp_state_cap;
 	u32 val, rp_state_cap;
@@ -436,7 +488,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
 {
 {
 
 
 	struct device *kdev = container_of(kobj, struct device, kobj);
 	struct device *kdev = container_of(kobj, struct device, kobj);
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_device *dev = minor->dev;
 	struct i915_error_state_file_priv error_priv;
 	struct i915_error_state_file_priv error_priv;
 	struct drm_i915_error_state_buf error_str;
 	struct drm_i915_error_state_buf error_str;
@@ -471,7 +523,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
 				 loff_t off, size_t count)
 				 loff_t off, size_t count)
 {
 {
 	struct device *kdev = container_of(kobj, struct device, kobj);
 	struct device *kdev = container_of(kobj, struct device, kobj);
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_device *dev = minor->dev;
 	int ret;
 	int ret;
 
 
@@ -501,27 +553,34 @@ void i915_setup_sysfs(struct drm_device *dev)
 
 
 #ifdef CONFIG_PM
 #ifdef CONFIG_PM
 	if (INTEL_INFO(dev)->gen >= 6) {
 	if (INTEL_INFO(dev)->gen >= 6) {
-		ret = sysfs_merge_group(&dev->primary->kdev.kobj,
+		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
 					&rc6_attr_group);
 					&rc6_attr_group);
 		if (ret)
 		if (ret)
 			DRM_ERROR("RC6 residency sysfs setup failed\n");
 			DRM_ERROR("RC6 residency sysfs setup failed\n");
 	}
 	}
 #endif
 #endif
-	if (HAS_L3_GPU_CACHE(dev)) {
-		ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
+	if (HAS_L3_DPF(dev)) {
+		ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
 		if (ret)
 		if (ret)
 			DRM_ERROR("l3 parity sysfs setup failed\n");
 			DRM_ERROR("l3 parity sysfs setup failed\n");
+
+		if (NUM_L3_SLICES(dev) > 1) {
+			ret = device_create_bin_file(dev->primary->kdev,
+						     &dpf_attrs_1);
+			if (ret)
+				DRM_ERROR("l3 parity slice 1 setup failed\n");
+		}
 	}
 	}
 
 
 	ret = 0;
 	ret = 0;
 	if (IS_VALLEYVIEW(dev))
 	if (IS_VALLEYVIEW(dev))
-		ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs);
+		ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
 	else if (INTEL_INFO(dev)->gen >= 6)
 	else if (INTEL_INFO(dev)->gen >= 6)
-		ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
+		ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
 	if (ret)
 	if (ret)
 		DRM_ERROR("RPS sysfs setup failed\n");
 		DRM_ERROR("RPS sysfs setup failed\n");
 
 
-	ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
+	ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
 				    &error_state_attr);
 				    &error_state_attr);
 	if (ret)
 	if (ret)
 		DRM_ERROR("error_state sysfs setup failed\n");
 		DRM_ERROR("error_state sysfs setup failed\n");
@@ -529,13 +588,14 @@ void i915_setup_sysfs(struct drm_device *dev)
 
 
 void i915_teardown_sysfs(struct drm_device *dev)
 void i915_teardown_sysfs(struct drm_device *dev)
 {
 {
-	sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
+	sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
 	if (IS_VALLEYVIEW(dev))
 	if (IS_VALLEYVIEW(dev))
-		sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
+		sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
 	else
 	else
-		sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
-	device_remove_bin_file(&dev->primary->kdev,  &dpf_attrs);
+		sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
+	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs_1);
+	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs);
 #ifdef CONFIG_PM
 #ifdef CONFIG_PM
-	sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+	sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
 #endif
 #endif
 }
 }

+ 59 - 3
drivers/gpu/drm/i915/i915_trace.h

@@ -233,6 +233,47 @@ TRACE_EVENT(i915_gem_evict_everything,
 	    TP_printk("dev=%d", __entry->dev)
 	    TP_printk("dev=%d", __entry->dev)
 );
 );
 
 
+TRACE_EVENT(i915_gem_evict_vm,
+	    TP_PROTO(struct i915_address_space *vm),
+	    TP_ARGS(vm),
+
+	    TP_STRUCT__entry(
+			     __field(struct i915_address_space *, vm)
+			    ),
+
+	    TP_fast_assign(
+			   __entry->vm = vm;
+			  ),
+
+	    TP_printk("dev=%d, vm=%p", __entry->vm->dev->primary->index, __entry->vm)
+);
+
+TRACE_EVENT(i915_gem_ring_sync_to,
+	    TP_PROTO(struct intel_ring_buffer *from,
+		     struct intel_ring_buffer *to,
+		     u32 seqno),
+	    TP_ARGS(from, to, seqno),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, sync_from)
+			     __field(u32, sync_to)
+			     __field(u32, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = from->dev->primary->index;
+			   __entry->sync_from = from->id;
+			   __entry->sync_to = to->id;
+			   __entry->seqno = seqno;
+			   ),
+
+	    TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
+		      __entry->dev,
+		      __entry->sync_from, __entry->sync_to,
+		      __entry->seqno)
+);
+
 TRACE_EVENT(i915_gem_ring_dispatch,
 TRACE_EVENT(i915_gem_ring_dispatch,
 	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
 	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
 	    TP_ARGS(ring, seqno, flags),
 	    TP_ARGS(ring, seqno, flags),
@@ -304,9 +345,24 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
 	    TP_ARGS(ring, seqno)
 	    TP_ARGS(ring, seqno)
 );
 );
 
 
-DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
-	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
-	    TP_ARGS(ring, seqno)
+TRACE_EVENT(i915_gem_request_complete,
+	    TP_PROTO(struct intel_ring_buffer *ring),
+	    TP_ARGS(ring),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, ring)
+			     __field(u32, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = ring->dev->primary->index;
+			   __entry->ring = ring->id;
+			   __entry->seqno = ring->get_seqno(ring, false);
+			   ),
+
+	    TP_printk("dev=%u, ring=%u, seqno=%u",
+		      __entry->dev, __entry->ring, __entry->seqno)
 );
 );
 
 
 DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
 DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,

Some files were not shown because too many files changed in this diff