Browse Source

Merge tag 'drm-intel-next-2016-09-19' of git://anongit.freedesktop.org/drm-intel into drm-next

- refactor the sseu code (Imre)
- refine guc dmesg output (Dave Gordon)
- more vgpu work
- more skl wm fixes (Lyude)
- refactor dpll code in prep for upfront link training (Jim Bride et al)
- consolidate all platform feature checks into intel_device_info (Carlos Santa)
- refactor elsp/execlist submission as prep for re-submission after hang
  recovery and eventually scheduling (Chris Wilson)
- allow synchronous gpu reset handling, to remove tricky/impossible/fragile
  error recovery code (Chris Wilson)
- prep work for nonblocking (execlist) submission, using fences to track
  depencies and drive elsp submission (Chris Wilson)
- partial error recover/resubmission of non-guilty batches after hangs (Chris Wilson)
- full dma-buf implicit fencing support (Chris Wilson)
- dp link training fixes (Jim, Dhinkaran, Navare, ...)
- obey dp branch device pixel rate/bpc/clock limits (Mika Kahola), needed for
  many vga dongles
- bunch of small cleanups and polish all over, as usual

[airlied: printing macros collided]

* tag 'drm-intel-next-2016-09-19' of git://anongit.freedesktop.org/drm-intel: (163 commits)
  drm/i915: Update DRIVER_DATE to 20160919
  drm: Fix DisplayPort branch device ID kernel-doc
  drm/i915: use NULL for NULL pointers
  drm/i915: do not use 'false' as a NULL pointer
  drm/i915: make intel_dp_compute_bpp static
  drm: Add DP branch device info on debugfs
  drm/i915: Update bits per component for display info
  drm/i915: Check pixel rate for DP to VGA dongle
  drm/i915: Read DP branch device SW revision
  drm/i915: Read DP branch device HW revision
  drm/i915: Cleanup DisplayPort AUX channel initialization
  drm: Read DP branch device id
  drm: Helper to read max bits per component
  drm: Helper to read max clock rate
  drm: Drop VGA from bpc definitions
  drm: Add missing DP downstream port types
  drm/i915: Add ddb size field to device info structure
  drm/i915/guc: general tidying up (submission)
  drm/i915/guc: general tidying up (loader)
  drm/i915: clarify PMINTRMSK/pm_intr_keep usage
  ...
Dave Airlie 9 years ago
parent
commit
b81a6179b6
70 changed files with 4809 additions and 3200 deletions
  1. 3 6
      arch/x86/kernel/early-quirks.c
  2. 173 0
      drivers/gpu/drm/drm_dp_helper.c
  3. 1 0
      drivers/gpu/drm/i915/Makefile
  4. 1 1
      drivers/gpu/drm/i915/i915_cmd_parser.c
  5. 480 535
      drivers/gpu/drm/i915/i915_debugfs.c
  6. 147 153
      drivers/gpu/drm/i915/i915_drv.c
  7. 115 112
      drivers/gpu/drm/i915/i915_drv.h
  8. 165 156
      drivers/gpu/drm/i915/i915_gem.c
  9. 0 16
      drivers/gpu/drm/i915/i915_gem_context.c
  10. 6 2
      drivers/gpu/drm/i915/i915_gem_evict.c
  11. 17 12
      drivers/gpu/drm/i915/i915_gem_execbuffer.c
  12. 70 71
      drivers/gpu/drm/i915/i915_gem_gtt.c
  13. 1 5
      drivers/gpu/drm/i915/i915_gem_gtt.h
  14. 195 34
      drivers/gpu/drm/i915/i915_gem_request.c
  15. 44 32
      drivers/gpu/drm/i915/i915_gem_request.h
  16. 2 2
      drivers/gpu/drm/i915/i915_gem_shrinker.c
  17. 10 9
      drivers/gpu/drm/i915/i915_gem_stolen.c
  18. 1 1
      drivers/gpu/drm/i915/i915_gem_userptr.c
  19. 26 10
      drivers/gpu/drm/i915/i915_gpu_error.c
  20. 0 3
      drivers/gpu/drm/i915/i915_guc_reg.h
  21. 71 55
      drivers/gpu/drm/i915/i915_guc_submission.c
  22. 62 64
      drivers/gpu/drm/i915/i915_irq.c
  23. 156 134
      drivers/gpu/drm/i915/i915_pci.c
  24. 14 1
      drivers/gpu/drm/i915/i915_reg.h
  25. 4 2
      drivers/gpu/drm/i915/i915_suspend.c
  26. 362 0
      drivers/gpu/drm/i915/i915_sw_fence.c
  27. 65 0
      drivers/gpu/drm/i915/i915_sw_fence.h
  28. 74 97
      drivers/gpu/drm/i915/i915_sysfs.c
  29. 0 3
      drivers/gpu/drm/i915/i915_vgpu.c
  30. 26 31
      drivers/gpu/drm/i915/intel_audio.c
  31. 3 0
      drivers/gpu/drm/i915/intel_breadcrumbs.c
  32. 14 10
      drivers/gpu/drm/i915/intel_color.c
  33. 37 11
      drivers/gpu/drm/i915/intel_crt.c
  34. 4 4
      drivers/gpu/drm/i915/intel_csr.c
  35. 185 91
      drivers/gpu/drm/i915/intel_ddi.c
  36. 70 70
      drivers/gpu/drm/i915/intel_device_info.c
  37. 418 199
      drivers/gpu/drm/i915/intel_display.c
  38. 200 94
      drivers/gpu/drm/i915/intel_dp.c
  39. 70 69
      drivers/gpu/drm/i915/intel_dp_link_training.c
  40. 40 57
      drivers/gpu/drm/i915/intel_dp_mst.c
  41. 285 173
      drivers/gpu/drm/i915/intel_dpll_mgr.c
  42. 15 0
      drivers/gpu/drm/i915/intel_dpll_mgr.h
  43. 65 31
      drivers/gpu/drm/i915/intel_drv.h
  44. 25 15
      drivers/gpu/drm/i915/intel_dsi.c
  45. 16 12
      drivers/gpu/drm/i915/intel_dvo.c
  46. 14 1
      drivers/gpu/drm/i915/intel_engine_cs.c
  47. 2 4
      drivers/gpu/drm/i915/intel_fbc.c
  48. 2 1
      drivers/gpu/drm/i915/intel_fbdev.c
  49. 3 1
      drivers/gpu/drm/i915/intel_guc.h
  50. 56 35
      drivers/gpu/drm/i915/intel_guc_loader.c
  51. 64 54
      drivers/gpu/drm/i915/intel_hdmi.c
  52. 65 69
      drivers/gpu/drm/i915/intel_i2c.c
  53. 246 294
      drivers/gpu/drm/i915/intel_lrc.c
  54. 1 4
      drivers/gpu/drm/i915/intel_lrc.h
  55. 32 20
      drivers/gpu/drm/i915/intel_lvds.c
  56. 27 0
      drivers/gpu/drm/i915/intel_opregion.c
  57. 24 16
      drivers/gpu/drm/i915/intel_panel.c
  58. 316 175
      drivers/gpu/drm/i915/intel_pm.c
  59. 7 7
      drivers/gpu/drm/i915/intel_psr.c
  60. 48 30
      drivers/gpu/drm/i915/intel_ringbuffer.c
  61. 20 6
      drivers/gpu/drm/i915/intel_ringbuffer.h
  62. 29 28
      drivers/gpu/drm/i915/intel_runtime_pm.c
  63. 32 24
      drivers/gpu/drm/i915/intel_sdvo.c
  64. 14 0
      drivers/gpu/drm/i915/intel_sprite.c
  65. 11 4
      drivers/gpu/drm/i915/intel_tv.c
  66. 6 12
      drivers/gpu/drm/i915/intel_uncore.c
  67. 20 3
      include/drm/drmP.h
  68. 17 5
      include/drm/drm_dp_helper.h
  69. 14 24
      include/drm/i915_pciids.h
  70. 1 0
      include/uapi/drm/i915_drm.h

+ 3 - 6
arch/x86/kernel/early-quirks.c

@@ -507,8 +507,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
 	INTEL_I915GM_IDS(&gen3_early_ops),
 	INTEL_I945G_IDS(&gen3_early_ops),
 	INTEL_I945GM_IDS(&gen3_early_ops),
-	INTEL_VLV_M_IDS(&gen6_early_ops),
-	INTEL_VLV_D_IDS(&gen6_early_ops),
+	INTEL_VLV_IDS(&gen6_early_ops),
 	INTEL_PINEVIEW_IDS(&gen3_early_ops),
 	INTEL_I965G_IDS(&gen3_early_ops),
 	INTEL_G33_IDS(&gen3_early_ops),
@@ -521,10 +520,8 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
 	INTEL_SNB_M_IDS(&gen6_early_ops),
 	INTEL_IVB_M_IDS(&gen6_early_ops),
 	INTEL_IVB_D_IDS(&gen6_early_ops),
-	INTEL_HSW_D_IDS(&gen6_early_ops),
-	INTEL_HSW_M_IDS(&gen6_early_ops),
-	INTEL_BDW_M_IDS(&gen8_early_ops),
-	INTEL_BDW_D_IDS(&gen8_early_ops),
+	INTEL_HSW_IDS(&gen6_early_ops),
+	INTEL_BDW_IDS(&gen8_early_ops),
 	INTEL_CHV_IDS(&chv_early_ops),
 	INTEL_SKL_IDS(&gen9_early_ops),
 	INTEL_BXT_IDS(&gen9_early_ops),

+ 173 - 0
drivers/gpu/drm/drm_dp_helper.c

@@ -439,6 +439,179 @@ int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
 }
 EXPORT_SYMBOL(drm_dp_link_configure);
 
+/**
+ * drm_dp_downstream_max_clock() - extract branch device max
+ *                                 pixel rate for legacy VGA
+ *                                 converter or max TMDS clock
+ *                                 rate for others
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ *
+ * Returns max clock in kHz on success or 0 if max clock not defined
+ */
+int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+				const u8 port_cap[4])
+{
+	int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
+	bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+		DP_DETAILED_CAP_INFO_AVAILABLE;
+
+	if (!detailed_cap_info)
+		return 0;
+
+	switch (type) {
+	case DP_DS_PORT_TYPE_VGA:
+		return port_cap[1] * 8 * 1000;
+	case DP_DS_PORT_TYPE_DVI:
+	case DP_DS_PORT_TYPE_HDMI:
+	case DP_DS_PORT_TYPE_DP_DUALMODE:
+		return port_cap[1] * 2500;
+	default:
+		return 0;
+	}
+}
+EXPORT_SYMBOL(drm_dp_downstream_max_clock);
+
+/**
+ * drm_dp_downstream_max_bpc() - extract branch device max
+ *                               bits per component
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ *
+ * Returns max bpc on success or 0 if max bpc not defined
+ */
+int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+			      const u8 port_cap[4])
+{
+	int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
+	bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+		DP_DETAILED_CAP_INFO_AVAILABLE;
+	int bpc;
+
+	if (!detailed_cap_info)
+		return 0;
+
+	switch (type) {
+	case DP_DS_PORT_TYPE_VGA:
+	case DP_DS_PORT_TYPE_DVI:
+	case DP_DS_PORT_TYPE_HDMI:
+	case DP_DS_PORT_TYPE_DP_DUALMODE:
+		bpc = port_cap[2] & DP_DS_MAX_BPC_MASK;
+
+		switch (bpc) {
+		case DP_DS_8BPC:
+			return 8;
+		case DP_DS_10BPC:
+			return 10;
+		case DP_DS_12BPC:
+			return 12;
+		case DP_DS_16BPC:
+			return 16;
+		}
+	default:
+		return 0;
+	}
+}
+EXPORT_SYMBOL(drm_dp_downstream_max_bpc);
+
+/**
+ * drm_dp_downstream_id() - identify branch device
+ * @aux: DisplayPort AUX channel
+ * @id: DisplayPort branch device id
+ *
+ * Returns branch device id on success or NULL on failure
+ */
+int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6])
+{
+	return drm_dp_dpcd_read(aux, DP_BRANCH_ID, id, 6);
+}
+EXPORT_SYMBOL(drm_dp_downstream_id);
+
+/**
+ * drm_dp_downstream_debug() - debug DP branch devices
+ * @m: pointer for debugfs file
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ * @aux: DisplayPort AUX channel
+ *
+ */
+void drm_dp_downstream_debug(struct seq_file *m,
+			     const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+			     const u8 port_cap[4], struct drm_dp_aux *aux)
+{
+	bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+				 DP_DETAILED_CAP_INFO_AVAILABLE;
+	int clk;
+	int bpc;
+	char id[6];
+	int len;
+	uint8_t rev[2];
+	int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
+	bool branch_device = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+			     DP_DWN_STRM_PORT_PRESENT;
+
+	seq_printf(m, "\tDP branch device present: %s\n",
+		   branch_device ? "yes" : "no");
+
+	if (!branch_device)
+		return;
+
+	switch (type) {
+	case DP_DS_PORT_TYPE_DP:
+		seq_puts(m, "\t\tType: DisplayPort\n");
+		break;
+	case DP_DS_PORT_TYPE_VGA:
+		seq_puts(m, "\t\tType: VGA\n");
+		break;
+	case DP_DS_PORT_TYPE_DVI:
+		seq_puts(m, "\t\tType: DVI\n");
+		break;
+	case DP_DS_PORT_TYPE_HDMI:
+		seq_puts(m, "\t\tType: HDMI\n");
+		break;
+	case DP_DS_PORT_TYPE_NON_EDID:
+		seq_puts(m, "\t\tType: others without EDID support\n");
+		break;
+	case DP_DS_PORT_TYPE_DP_DUALMODE:
+		seq_puts(m, "\t\tType: DP++\n");
+		break;
+	case DP_DS_PORT_TYPE_WIRELESS:
+		seq_puts(m, "\t\tType: Wireless\n");
+		break;
+	default:
+		seq_puts(m, "\t\tType: N/A\n");
+	}
+
+	drm_dp_downstream_id(aux, id);
+	seq_printf(m, "\t\tID: %s\n", id);
+
+	len = drm_dp_dpcd_read(aux, DP_BRANCH_HW_REV, &rev[0], 1);
+	if (len > 0)
+		seq_printf(m, "\t\tHW: %d.%d\n",
+			   (rev[0] & 0xf0) >> 4, rev[0] & 0xf);
+
+	len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, &rev, 2);
+	if (len > 0)
+		seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]);
+
+	if (detailed_cap_info) {
+		clk = drm_dp_downstream_max_clock(dpcd, port_cap);
+
+		if (clk > 0) {
+			if (type == DP_DS_PORT_TYPE_VGA)
+				seq_printf(m, "\t\tMax dot clock: %d kHz\n", clk);
+			else
+				seq_printf(m, "\t\tMax TMDS clock: %d kHz\n", clk);
+		}
+
+		bpc = drm_dp_downstream_max_bpc(dpcd, port_cap);
+
+		if (bpc > 0)
+			seq_printf(m, "\t\tMax bpc: %d\n", bpc);
+	}
+}
+EXPORT_SYMBOL(drm_dp_downstream_debug);
+
 /*
  * I2C-over-AUX implementation
  */

+ 1 - 0
drivers/gpu/drm/i915/Makefile

@@ -16,6 +16,7 @@ i915-y := i915_drv.o \
 	  i915_params.o \
 	  i915_pci.o \
           i915_suspend.o \
+	  i915_sw_fence.o \
 	  i915_sysfs.o \
 	  intel_csr.o \
 	  intel_device_info.o \

+ 1 - 1
drivers/gpu/drm/i915/i915_cmd_parser.c

@@ -984,7 +984,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
 
 	src = ERR_PTR(-ENODEV);
 	if (src_needs_clflush &&
-	    i915_memcpy_from_wc((void *)(uintptr_t)batch_start_offset, 0, 0)) {
+	    i915_memcpy_from_wc((void *)(uintptr_t)batch_start_offset, NULL, 0)) {
 		src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
 		if (!IS_ERR(src)) {
 			i915_memcpy_from_wc(dst,

+ 480 - 535
drivers/gpu/drm/i915/i915_debugfs.c

@@ -40,6 +40,11 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
+static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
+{
+	return to_i915(node->minor->dev);
+}
+
 /* As the drm_debugfs_init() routines are called before dev->dev_private is
  * allocated we need to hook into the minor for release. */
 static int
@@ -57,7 +62,7 @@ drm_add_fake_info_node(struct drm_minor *minor,
 
 	node->minor = minor;
 	node->dent = ent;
-	node->info_ent = (void *) key;
+	node->info_ent = (void *)key;
 
 	mutex_lock(&minor->debugfs_lock);
 	list_add(&node->list, &minor->debugfs_list);
@@ -68,12 +73,11 @@ drm_add_fake_info_node(struct drm_minor *minor,
 
 static int i915_capabilities(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	const struct intel_device_info *info = INTEL_INFO(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	const struct intel_device_info *info = INTEL_INFO(dev_priv);
 
-	seq_printf(m, "gen: %d\n", info->gen);
-	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
+	seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
+	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
 #define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
 #define SEP_SEMICOLON ;
 	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
@@ -155,7 +159,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 	seq_printf(m, "] %x %s%s%s",
 		   i915_gem_active_get_seqno(&obj->last_write,
 					     &obj->base.dev->struct_mutex),
-		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
+		   i915_cache_level_str(dev_priv, obj->cache_level),
 		   obj->dirty ? " dirty" : "",
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 	if (obj->base.name)
@@ -195,7 +199,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 	}
 
 	engine = i915_gem_active_get_engine(&obj->last_write,
-					    &obj->base.dev->struct_mutex);
+					    &dev_priv->drm.struct_mutex);
 	if (engine)
 		seq_printf(m, " (%s)", engine->name);
 
@@ -221,9 +225,8 @@ static int obj_rank_by_stolen(void *priv,
 
 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct drm_i915_gem_object *obj;
 	u64 total_obj_size, total_gtt_size;
 	LIST_HEAD(stolen);
@@ -365,29 +368,29 @@ static int per_file_ctx_stats(int id, void *ptr, void *data)
 static void print_context_stats(struct seq_file *m,
 				struct drm_i915_private *dev_priv)
 {
+	struct drm_device *dev = &dev_priv->drm;
 	struct file_stats stats;
 	struct drm_file *file;
 
 	memset(&stats, 0, sizeof(stats));
 
-	mutex_lock(&dev_priv->drm.struct_mutex);
+	mutex_lock(&dev->struct_mutex);
 	if (dev_priv->kernel_context)
 		per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
 
-	list_for_each_entry(file, &dev_priv->drm.filelist, lhead) {
+	list_for_each_entry(file, &dev->filelist, lhead) {
 		struct drm_i915_file_private *fpriv = file->driver_priv;
 		idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
 	}
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&dev->struct_mutex);
 
 	print_file_stats(m, "[k]contexts", stats);
 }
 
-static int i915_gem_object_info(struct seq_file *m, void* data)
+static int i915_gem_object_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	u32 count, mapped_count, purgeable_count, dpy_count;
 	u64 size, mapped_size, purgeable_size, dpy_size;
@@ -497,9 +500,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 static int i915_gem_gtt_info(struct seq_file *m, void *data)
 {
 	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	bool show_pin_display_only = !!data;
+	struct drm_i915_private *dev_priv = node_to_i915(node);
+	struct drm_device *dev = &dev_priv->drm;
+	bool show_pin_display_only = !!node->info_ent->data;
 	struct drm_i915_gem_object *obj;
 	u64 total_obj_size, total_gtt_size;
 	int count, ret;
@@ -531,9 +534,8 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
 
 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_crtc *crtc;
 	int ret;
 
@@ -580,7 +582,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 				   intel_crtc_get_vblank_counter(crtc));
 			seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
 
-			if (INTEL_INFO(dev)->gen >= 4)
+			if (INTEL_GEN(dev_priv) >= 4)
 				addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
 			else
 				addr = I915_READ(DSPADDR(crtc->plane));
@@ -601,9 +603,8 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 
 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct drm_i915_gem_object *obj;
 	struct intel_engine_cs *engine;
 	int total = 0;
@@ -646,9 +647,8 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
 
 static int i915_gem_request_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_engine_cs *engine;
 	struct drm_i915_gem_request *req;
 	int ret, any;
@@ -713,41 +713,25 @@ static void i915_ring_seqno_info(struct seq_file *m,
 
 static int i915_gem_seqno_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct intel_engine_cs *engine;
-	int ret;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-	intel_runtime_pm_get(dev_priv);
 
 	for_each_engine(engine, dev_priv)
 		i915_ring_seqno_info(m, engine);
 
-	intel_runtime_pm_put(dev_priv);
-	mutex_unlock(&dev->struct_mutex);
-
 	return 0;
 }
 
 
 static int i915_interrupt_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct intel_engine_cs *engine;
-	int ret, i, pipe;
+	int i, pipe;
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
 	intel_runtime_pm_get(dev_priv);
 
-	if (IS_CHERRYVIEW(dev)) {
+	if (IS_CHERRYVIEW(dev_priv)) {
 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
 			   I915_READ(GEN8_MASTER_IRQ));
 
@@ -786,7 +770,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 			   I915_READ(GEN8_PCU_IIR));
 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
 			   I915_READ(GEN8_PCU_IER));
-	} else if (INTEL_INFO(dev)->gen >= 8) {
+	} else if (INTEL_GEN(dev_priv) >= 8) {
 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
 			   I915_READ(GEN8_MASTER_IRQ));
 
@@ -842,7 +826,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 			   I915_READ(GEN8_PCU_IIR));
 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
 			   I915_READ(GEN8_PCU_IER));
-	} else if (IS_VALLEYVIEW(dev)) {
+	} else if (IS_VALLEYVIEW(dev_priv)) {
 		seq_printf(m, "Display IER:\t%08x\n",
 			   I915_READ(VLV_IER));
 		seq_printf(m, "Display IIR:\t%08x\n",
@@ -880,7 +864,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 		seq_printf(m, "DPINVGTT:\t%08x\n",
 			   I915_READ(DPINVGTT));
 
-	} else if (!HAS_PCH_SPLIT(dev)) {
+	} else if (!HAS_PCH_SPLIT(dev_priv)) {
 		seq_printf(m, "Interrupt enable:    %08x\n",
 			   I915_READ(IER));
 		seq_printf(m, "Interrupt identity:  %08x\n",
@@ -912,7 +896,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 			   I915_READ(GTIMR));
 	}
 	for_each_engine(engine, dev_priv) {
-		if (INTEL_INFO(dev)->gen >= 6) {
+		if (INTEL_GEN(dev_priv) >= 6) {
 			seq_printf(m,
 				   "Graphics Interrupt mask (%s):	%08x\n",
 				   engine->name, I915_READ_IMR(engine));
@@ -920,16 +904,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 		i915_ring_seqno_info(m, engine);
 	}
 	intel_runtime_pm_put(dev_priv);
-	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
 }
 
 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	int i, ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -956,8 +938,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 static int i915_hws_info(struct seq_file *m, void *data)
 {
 	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(node);
 	struct intel_engine_cs *engine;
 	const u32 *hws;
 	int i;
@@ -982,33 +963,25 @@ i915_error_state_write(struct file *filp,
 		       loff_t *ppos)
 {
 	struct i915_error_state_file_priv *error_priv = filp->private_data;
-	struct drm_device *dev = error_priv->dev;
-	int ret;
 
 	DRM_DEBUG_DRIVER("Resetting error state\n");
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-
-	i915_destroy_error_state(dev);
-	mutex_unlock(&dev->struct_mutex);
+	i915_destroy_error_state(error_priv->dev);
 
 	return cnt;
 }
 
 static int i915_error_state_open(struct inode *inode, struct file *file)
 {
-	struct drm_device *dev = inode->i_private;
+	struct drm_i915_private *dev_priv = inode->i_private;
 	struct i915_error_state_file_priv *error_priv;
 
 	error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
 	if (!error_priv)
 		return -ENOMEM;
 
-	error_priv->dev = dev;
+	error_priv->dev = &dev_priv->drm;
 
-	i915_error_state_get(dev, error_priv);
+	i915_error_state_get(&dev_priv->drm, error_priv);
 
 	file->private_data = error_priv;
 
@@ -1034,7 +1007,8 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
 	ssize_t ret_count = 0;
 	int ret;
 
-	ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
+	ret = i915_error_state_buf_init(&error_str,
+					to_i915(error_priv->dev), count, *pos);
 	if (ret)
 		return ret;
 
@@ -1067,16 +1041,15 @@ static const struct file_operations i915_error_state_fops = {
 static int
 i915_next_seqno_get(void *data, u64 *val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 	int ret;
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
 	if (ret)
 		return ret;
 
 	*val = dev_priv->next_seqno;
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev_priv->drm.struct_mutex);
 
 	return 0;
 }
@@ -1084,7 +1057,8 @@ i915_next_seqno_get(void *data, u64 *val)
 static int
 i915_next_seqno_set(void *data, u64 val)
 {
-	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = data;
+	struct drm_device *dev = &dev_priv->drm;
 	int ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1103,14 +1077,13 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
 
 static int i915_frequency_info(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	int ret = 0;
 
 	intel_runtime_pm_get(dev_priv);
 
-	if (IS_GEN5(dev)) {
+	if (IS_GEN5(dev_priv)) {
 		u16 rgvswctl = I915_READ16(MEMSWCTL);
 		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
 
@@ -1120,7 +1093,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 			   MEMSTAT_VID_SHIFT);
 		seq_printf(m, "Current P-state: %d\n",
 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
-	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		u32 freq_sts;
 
 		mutex_lock(&dev_priv->rps.hw_lock);
@@ -1147,7 +1120,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 			   "efficient (RPe) frequency: %d MHz\n",
 			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
 		mutex_unlock(&dev_priv->rps.hw_lock);
-	} else if (INTEL_INFO(dev)->gen >= 6) {
+	} else if (INTEL_GEN(dev_priv) >= 6) {
 		u32 rp_state_limits;
 		u32 gt_perf_status;
 		u32 rp_state_cap;
@@ -1159,7 +1132,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 		int max_freq;
 
 		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
-		if (IS_BROXTON(dev)) {
+		if (IS_BROXTON(dev_priv)) {
 			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
 			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
 		} else {
@@ -1175,11 +1148,11 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
 		reqf = I915_READ(GEN6_RPNSWREQ);
-		if (IS_GEN9(dev))
+		if (IS_GEN9(dev_priv))
 			reqf >>= 23;
 		else {
 			reqf &= ~GEN6_TURBO_DISABLE;
-			if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+			if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 				reqf >>= 24;
 			else
 				reqf >>= 25;
@@ -1197,9 +1170,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
-		if (IS_GEN9(dev))
+		if (IS_GEN9(dev_priv))
 			cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
-		else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 			cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
 		else
 			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
@@ -1208,7 +1181,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 		mutex_unlock(&dev->struct_mutex);
 
-		if (IS_GEN6(dev) || IS_GEN7(dev)) {
+		if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
 			pm_ier = I915_READ(GEN6_PMIER);
 			pm_imr = I915_READ(GEN6_PMIMR);
 			pm_isr = I915_READ(GEN6_PMISR);
@@ -1226,7 +1199,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 		seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep);
 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
 		seq_printf(m, "Render p-state ratio: %d\n",
-			   (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
+			   (gt_perf_status & (IS_GEN9(dev_priv) ? 0x1ff00 : 0xff00)) >> 8);
 		seq_printf(m, "Render p-state VID: %d\n",
 			   gt_perf_status & 0xff);
 		seq_printf(m, "Render p-state limit: %d\n",
@@ -1255,22 +1228,22 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 		seq_printf(m, "Down threshold: %d%%\n",
 			   dev_priv->rps.down_threshold);
 
-		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
+		max_freq = (IS_BROXTON(dev_priv) ? rp_state_cap >> 0 :
 			    rp_state_cap >> 16) & 0xff;
-		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+		max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
 			     GEN9_FREQ_SCALER : 1);
 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
 			   intel_gpu_freq(dev_priv, max_freq));
 
 		max_freq = (rp_state_cap & 0xff00) >> 8;
-		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+		max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
 			     GEN9_FREQ_SCALER : 1);
 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
 			   intel_gpu_freq(dev_priv, max_freq));
 
-		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
+		max_freq = (IS_BROXTON(dev_priv) ? rp_state_cap >> 16 :
 			    rp_state_cap >> 0) & 0xff;
-		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+		max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
 			     GEN9_FREQ_SCALER : 1);
 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
 			   intel_gpu_freq(dev_priv, max_freq));
@@ -1306,9 +1279,7 @@ out:
 
 static int i915_hangcheck_info(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct intel_engine_cs *engine;
 	u64 acthd[I915_NUM_ENGINES];
 	u32 seqno[I915_NUM_ENGINES];
@@ -1316,6 +1287,15 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 	enum intel_engine_id id;
 	int j;
 
+	if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
+		seq_printf(m, "Wedged\n");
+	if (test_bit(I915_RESET_IN_PROGRESS, &dev_priv->gpu_error.flags))
+		seq_printf(m, "Reset in progress\n");
+	if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
+		seq_printf(m, "Waiter holding struct mutex\n");
+	if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
+		seq_printf(m, "struct_mutex blocked for reset\n");
+
 	if (!i915.enable_hangcheck) {
 		seq_printf(m, "Hangcheck disabled\n");
 		return 0;
@@ -1376,9 +1356,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 
 static int ironlake_drpc_info(struct seq_file *m)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	u32 rgvmodectl, rstdbyctl;
 	u16 crstandvid;
 	int ret;
@@ -1444,9 +1423,7 @@ static int ironlake_drpc_info(struct seq_file *m)
 
 static int i915_forcewake_domains(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct intel_uncore_forcewake_domain *fw_domain;
 
 	spin_lock_irq(&dev_priv->uncore.lock);
@@ -1462,9 +1439,7 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
 
 static int vlv_drpc_info(struct seq_file *m)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	u32 rpmodectl1, rcctl1, pw_status;
 
 	intel_runtime_pm_get(dev_priv);
@@ -1502,9 +1477,8 @@ static int vlv_drpc_info(struct seq_file *m)
 
 static int gen6_drpc_info(struct seq_file *m)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
 	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
 	unsigned forcewake_count;
@@ -1534,7 +1508,7 @@ static int gen6_drpc_info(struct seq_file *m)
 
 	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
-	if (INTEL_INFO(dev)->gen >= 9) {
+	if (INTEL_GEN(dev_priv) >= 9) {
 		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
 		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
 	}
@@ -1556,7 +1530,7 @@ static int gen6_drpc_info(struct seq_file *m)
 		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
 	seq_printf(m, "RC6 Enabled: %s\n",
 		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
-	if (INTEL_INFO(dev)->gen >= 9) {
+	if (INTEL_GEN(dev_priv) >= 9) {
 		seq_printf(m, "Render Well Gating Enabled: %s\n",
 			yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
 		seq_printf(m, "Media Well Gating Enabled: %s\n",
@@ -1590,7 +1564,7 @@ static int gen6_drpc_info(struct seq_file *m)
 
 	seq_printf(m, "Core Power Down: %s\n",
 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
-	if (INTEL_INFO(dev)->gen >= 9) {
+	if (INTEL_GEN(dev_priv) >= 9) {
 		seq_printf(m, "Render Power Well: %s\n",
 			(gen9_powergate_status &
 			 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
@@ -1620,12 +1594,11 @@ static int gen6_drpc_info(struct seq_file *m)
 
 static int i915_drpc_info(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		return vlv_drpc_info(m);
-	else if (INTEL_INFO(dev)->gen >= 6)
+	else if (INTEL_GEN(dev_priv) >= 6)
 		return gen6_drpc_info(m);
 	else
 		return ironlake_drpc_info(m);
@@ -1633,9 +1606,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
 
 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
 		   dev_priv->fb_tracking.busy_bits);
@@ -1648,11 +1619,9 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
 
 static int i915_fbc_status(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
-	if (!HAS_FBC(dev)) {
+	if (!HAS_FBC(dev_priv)) {
 		seq_puts(m, "FBC unsupported on this chipset\n");
 		return 0;
 	}
@@ -1666,7 +1635,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
 		seq_printf(m, "FBC disabled: %s\n",
 			   dev_priv->fbc.no_fbc_reason);
 
-	if (INTEL_INFO(dev_priv)->gen >= 7)
+	if (INTEL_GEN(dev_priv) >= 7)
 		seq_printf(m, "Compressing: %s\n",
 			   yesno(I915_READ(FBC_STATUS2) &
 				 FBC_COMPRESSION_MASK));
@@ -1679,10 +1648,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
 
 static int i915_fbc_fc_get(void *data, u64 *val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 
-	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
+	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
 		return -ENODEV;
 
 	*val = dev_priv->fbc.false_color;
@@ -1692,11 +1660,10 @@ static int i915_fbc_fc_get(void *data, u64 *val)
 
 static int i915_fbc_fc_set(void *data, u64 val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 	u32 reg;
 
-	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
+	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
 		return -ENODEV;
 
 	mutex_lock(&dev_priv->fbc.lock);
@@ -1718,11 +1685,9 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
 
 static int i915_ips_status(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
-	if (!HAS_IPS(dev)) {
+	if (!HAS_IPS(dev_priv)) {
 		seq_puts(m, "not supported\n");
 		return 0;
 	}
@@ -1732,7 +1697,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
 	seq_printf(m, "Enabled by kernel parameter: %s\n",
 		   yesno(i915.enable_ips));
 
-	if (INTEL_INFO(dev)->gen >= 8) {
+	if (INTEL_GEN(dev_priv) >= 8) {
 		seq_puts(m, "Currently: unknown\n");
 	} else {
 		if (I915_READ(IPS_CTL) & IPS_ENABLE)
@@ -1748,23 +1713,21 @@ static int i915_ips_status(struct seq_file *m, void *unused)
 
 static int i915_sr_status(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	bool sr_enabled = false;
 
 	intel_runtime_pm_get(dev_priv);
 
-	if (HAS_PCH_SPLIT(dev))
+	if (HAS_PCH_SPLIT(dev_priv))
 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
-	else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
-		 IS_I945G(dev) || IS_I945GM(dev))
+	else if (IS_CRESTLINE(dev_priv) || IS_G4X(dev_priv) ||
+		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
-	else if (IS_I915GM(dev))
+	else if (IS_I915GM(dev_priv))
 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
-	else if (IS_PINEVIEW(dev))
+	else if (IS_PINEVIEW(dev_priv))
 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
-	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
 
 	intel_runtime_pm_put(dev_priv);
@@ -1777,13 +1740,12 @@ static int i915_sr_status(struct seq_file *m, void *unused)
 
 static int i915_emon_status(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	unsigned long temp, chipset, gfx;
 	int ret;
 
-	if (!IS_GEN5(dev))
+	if (!IS_GEN5(dev_priv))
 		return -ENODEV;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1805,14 +1767,12 @@ static int i915_emon_status(struct seq_file *m, void *unused)
 
 static int i915_ring_freq_table(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	int ret = 0;
 	int gpu_freq, ia_freq;
 	unsigned int max_gpu_freq, min_gpu_freq;
 
-	if (!HAS_CORE_RING_FREQ(dev)) {
+	if (!HAS_LLC(dev_priv)) {
 		seq_puts(m, "unsupported on this chipset\n");
 		return 0;
 	}
@@ -1823,7 +1783,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
 	if (ret)
 		goto out;
 
-	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
 		/* Convert GT frequency to 50 HZ units */
 		min_gpu_freq =
 			dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
@@ -1843,7 +1803,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
 				       &ia_freq);
 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
 			   intel_gpu_freq(dev_priv, (gpu_freq *
-				(IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+				(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
 				 GEN9_FREQ_SCALER : 1))),
 			   ((ia_freq >> 0) & 0xff) * 100,
 			   ((ia_freq >> 8) & 0xff) * 100);
@@ -1858,9 +1818,8 @@ out:
 
 static int i915_opregion(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_opregion *opregion = &dev_priv->opregion;
 	int ret;
 
@@ -1879,10 +1838,7 @@ out:
 
 static int i915_vbt(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_opregion *opregion = &dev_priv->opregion;
+	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
 
 	if (opregion->vbt)
 		seq_write(m, opregion->vbt, opregion->vbt_size);
@@ -1892,8 +1848,8 @@ static int i915_vbt(struct seq_file *m, void *unused)
 
 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_framebuffer *fbdev_fb = NULL;
 	struct drm_framebuffer *drm_fb;
 	int ret;
@@ -1903,8 +1859,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
 		return ret;
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
-	if (to_i915(dev)->fbdev) {
-		fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
+	if (dev_priv->fbdev) {
+		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
 
 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
 			   fbdev_fb->base.width,
@@ -1949,9 +1905,8 @@ static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
 
 static int i915_context_status(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx;
 	int ret;
@@ -2043,9 +1998,8 @@ static void i915_dump_lrc_obj(struct seq_file *m,
 
 static int i915_dump_lrc(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx;
 	int ret;
@@ -2070,9 +2024,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
 
 static int i915_execlists(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = (struct drm_info_node *)m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_engine_cs *engine;
 	u32 status_pointer;
 	u8 read_pointer;
@@ -2107,7 +2060,7 @@ static int i915_execlists(struct seq_file *m, void *data)
 		status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
 		seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
 
-		read_pointer = engine->next_context_status_buffer;
+		read_pointer = GEN8_CSB_READ_PTR(status_pointer);
 		write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
 		if (read_pointer > write_pointer)
 			write_pointer += GEN8_CSB_ENTRIES;
@@ -2173,9 +2126,8 @@ static const char *swizzle_string(unsigned swizzle)
 
 static int i915_swizzle_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	int ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2188,7 +2140,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
 		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
 
-	if (IS_GEN3(dev) || IS_GEN4(dev)) {
+	if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
 		seq_printf(m, "DDC = 0x%08x\n",
 			   I915_READ(DCC));
 		seq_printf(m, "DDC2 = 0x%08x\n",
@@ -2197,7 +2149,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
 			   I915_READ16(C0DRB3));
 		seq_printf(m, "C1DRB3 = 0x%04x\n",
 			   I915_READ16(C1DRB3));
-	} else if (INTEL_INFO(dev)->gen >= 6) {
+	} else if (INTEL_GEN(dev_priv) >= 6) {
 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
 			   I915_READ(MAD_DIMM_C0));
 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
@@ -2206,7 +2158,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
 			   I915_READ(MAD_DIMM_C2));
 		seq_printf(m, "TILECTL = 0x%08x\n",
 			   I915_READ(TILECTL));
-		if (INTEL_INFO(dev)->gen >= 8)
+		if (INTEL_GEN(dev_priv) >= 8)
 			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
 				   I915_READ(GAMTARBMODE));
 		else
@@ -2246,9 +2198,9 @@ static int per_file_ctx(int id, void *ptr, void *data)
 	return 0;
 }
 
-static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
+static void gen8_ppgtt_info(struct seq_file *m,
+			    struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_engine_cs *engine;
 	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 	int i;
@@ -2267,9 +2219,9 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 	}
 }
 
-static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
+static void gen6_ppgtt_info(struct seq_file *m,
+			    struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_engine_cs *engine;
 
 	if (IS_GEN6(dev_priv))
@@ -2301,22 +2253,23 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 
 static int i915_ppgtt_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct drm_file *file;
+	int ret;
 
-	int ret = mutex_lock_interruptible(&dev->struct_mutex);
+	mutex_lock(&dev->filelist_mutex);
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
-		return ret;
+		goto out_unlock;
+
 	intel_runtime_pm_get(dev_priv);
 
-	if (INTEL_INFO(dev)->gen >= 8)
-		gen8_ppgtt_info(m, dev);
-	else if (INTEL_INFO(dev)->gen >= 6)
-		gen6_ppgtt_info(m, dev);
+	if (INTEL_GEN(dev_priv) >= 8)
+		gen8_ppgtt_info(m, dev_priv);
+	else if (INTEL_GEN(dev_priv) >= 6)
+		gen6_ppgtt_info(m, dev_priv);
 
-	mutex_lock(&dev->filelist_mutex);
 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
 		struct drm_i915_file_private *file_priv = file->driver_priv;
 		struct task_struct *task;
@@ -2324,19 +2277,19 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
 		task = get_pid_task(file->pid, PIDTYPE_PID);
 		if (!task) {
 			ret = -ESRCH;
-			goto out_unlock;
+			goto out_rpm;
 		}
 		seq_printf(m, "\nproc: %s\n", task->comm);
 		put_task_struct(task);
 		idr_for_each(&file_priv->context_idr, per_file_ctx,
 			     (void *)(unsigned long)m);
 	}
-out_unlock:
-	mutex_unlock(&dev->filelist_mutex);
 
+out_rpm:
 	intel_runtime_pm_put(dev_priv);
 	mutex_unlock(&dev->struct_mutex);
-
+out_unlock:
+	mutex_unlock(&dev->filelist_mutex);
 	return ret;
 }
 
@@ -2367,9 +2320,8 @@ static const char *rps_power_to_str(unsigned int power)
 
 static int i915_rps_boost_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct drm_file *file;
 
 	seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
@@ -2437,12 +2389,10 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
 
 static int i915_llc(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	const bool edram = INTEL_GEN(dev_priv) > 8;
 
-	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
+	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
 	seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
 		   intel_uncore_edram_size(dev_priv)/1024/1024);
 
@@ -2451,8 +2401,7 @@ static int i915_llc(struct seq_file *m, void *data)
 
 static int i915_guc_load_status_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_i915_private *dev_priv = to_i915(node->minor->dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
 	u32 tmp, i;
 
@@ -2523,9 +2472,8 @@ static void i915_guc_client_info(struct seq_file *m,
 
 static int i915_guc_info(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_guc guc;
 	struct i915_guc_client client = {};
 	struct intel_engine_cs *engine;
@@ -2574,9 +2522,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
 
 static int i915_guc_log_dump(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct drm_i915_gem_object *obj;
 	int i = 0, pg;
 
@@ -2602,15 +2548,13 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
 
 static int i915_edp_psr_status(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	u32 psrperf = 0;
 	u32 stat[3];
 	enum pipe pipe;
 	bool enabled = false;
 
-	if (!HAS_PSR(dev)) {
+	if (!HAS_PSR(dev_priv)) {
 		seq_puts(m, "PSR not supported\n");
 		return 0;
 	}
@@ -2627,7 +2571,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 	seq_printf(m, "Re-enable work scheduled: %s\n",
 		   yesno(work_busy(&dev_priv->psr.work.work)));
 
-	if (HAS_DDI(dev))
+	if (HAS_DDI(dev_priv))
 		enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
 	else {
 		for_each_pipe(dev_priv, pipe) {
@@ -2644,7 +2588,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 
 	seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
 
-	if (!HAS_DDI(dev))
+	if (!HAS_DDI(dev_priv))
 		for_each_pipe(dev_priv, pipe) {
 			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
 			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
@@ -2656,7 +2600,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 	 * VLV/CHV PSR has no kind of performance counter
 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
 	 */
-	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		psrperf = I915_READ(EDP_PSR_PERF_CNT) &
 			EDP_PSR_PERF_CNT_MASK;
 
@@ -2670,8 +2614,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 
 static int i915_sink_crc(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_connector *connector;
 	struct intel_dp *intel_dp = NULL;
 	int ret;
@@ -2710,13 +2654,11 @@ out:
 
 static int i915_energy_uJ(struct seq_file *m, void *data)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	u64 power;
 	u32 units;
 
-	if (INTEL_INFO(dev)->gen < 6)
+	if (INTEL_GEN(dev_priv) < 6)
 		return -ENODEV;
 
 	intel_runtime_pm_get(dev_priv);
@@ -2736,9 +2678,8 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
 
 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 
 	if (!HAS_RUNTIME_PM(dev_priv))
 		seq_puts(m, "Runtime power management not supported\n");
@@ -2748,22 +2689,20 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
 		   yesno(!intel_irqs_enabled(dev_priv)));
 #ifdef CONFIG_PM
 	seq_printf(m, "Usage count: %d\n",
-		   atomic_read(&dev->dev->power.usage_count));
+		   atomic_read(&dev_priv->drm.dev->power.usage_count));
 #else
 	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
 #endif
 	seq_printf(m, "PCI device power state: %s [%d]\n",
-		   pci_power_name(dev_priv->drm.pdev->current_state),
-		   dev_priv->drm.pdev->current_state);
+		   pci_power_name(pdev->current_state),
+		   pdev->current_state);
 
 	return 0;
 }
 
 static int i915_power_domain_info(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
 	int i;
 
@@ -2796,12 +2735,10 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
 
 static int i915_dmc_info(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct intel_csr *csr;
 
-	if (!HAS_CSR(dev)) {
+	if (!HAS_CSR(dev_priv)) {
 		seq_puts(m, "not supported\n");
 		return 0;
 	}
@@ -2819,12 +2756,12 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
 		   CSR_VERSION_MINOR(csr->version));
 
-	if (IS_SKYLAKE(dev) && csr->version >= CSR_VERSION(1, 6)) {
+	if (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6)) {
 		seq_printf(m, "DC3 -> DC5 count: %d\n",
 			   I915_READ(SKL_CSR_DC3_DC5_COUNT));
 		seq_printf(m, "DC5 -> DC6 count: %d\n",
 			   I915_READ(SKL_CSR_DC5_DC6_COUNT));
-	} else if (IS_BROXTON(dev) && csr->version >= CSR_VERSION(1, 4)) {
+	} else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
 		seq_printf(m, "DC3 -> DC5 count: %d\n",
 			   I915_READ(BXT_CSR_DC3_DC5_COUNT));
 	}
@@ -2861,8 +2798,8 @@ static void intel_encoder_info(struct seq_file *m,
 			       struct intel_crtc *intel_crtc,
 			       struct intel_encoder *intel_encoder)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct drm_crtc *crtc = &intel_crtc->base;
 	struct intel_connector *intel_connector;
 	struct drm_encoder *encoder;
@@ -2888,8 +2825,8 @@ static void intel_encoder_info(struct seq_file *m,
 
 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct drm_crtc *crtc = &intel_crtc->base;
 	struct intel_encoder *intel_encoder;
 	struct drm_plane_state *plane_state = crtc->primary->state;
@@ -2923,6 +2860,9 @@ static void intel_dp_info(struct seq_file *m,
 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
 		intel_panel_info(m, &intel_connector->panel);
+
+	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
+				&intel_dp->aux);
 }
 
 static void intel_hdmi_info(struct seq_file *m,
@@ -2987,12 +2927,11 @@ static void intel_connector_info(struct seq_file *m,
 		intel_seq_print_mode(m, 2, mode);
 }
 
-static bool cursor_active(struct drm_device *dev, int pipe)
+static bool cursor_active(struct drm_i915_private *dev_priv, int pipe)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	u32 state;
 
-	if (IS_845G(dev) || IS_I865G(dev))
+	if (IS_845G(dev_priv) || IS_I865G(dev_priv))
 		state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
 	else
 		state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
@@ -3000,9 +2939,9 @@ static bool cursor_active(struct drm_device *dev, int pipe)
 	return state;
 }
 
-static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
+static bool cursor_position(struct drm_i915_private *dev_priv,
+			    int pipe, int *x, int *y)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	u32 pos;
 
 	pos = I915_READ(CURPOS(pipe));
@@ -3015,7 +2954,7 @@ static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
 	if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
 		*y = -*y;
 
-	return cursor_active(dev, pipe);
+	return cursor_active(dev_priv, pipe);
 }
 
 static const char *plane_type(enum drm_plane_type type)
@@ -3058,8 +2997,8 @@ static const char *plane_rotation(unsigned int rotation)
 
 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_plane *intel_plane;
 
 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
@@ -3130,9 +3069,8 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
 
 static int i915_display_info(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_crtc *crtc;
 	struct drm_connector *connector;
 
@@ -3156,7 +3094,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
 		if (pipe_config->base.active) {
 			intel_crtc_info(m, crtc);
 
-			active = cursor_position(dev, crtc->pipe, &x, &y);
+			active = cursor_position(dev_priv, crtc->pipe, &x, &y);
 			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
 				   yesno(crtc->cursor_base),
 				   x, y, crtc->base.cursor->state->crtc_w,
@@ -3185,11 +3123,10 @@ static int i915_display_info(struct seq_file *m, void *unused)
 
 static int i915_semaphore_status(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_engine_cs *engine;
-	int num_rings = INTEL_INFO(dev)->num_rings;
+	int num_rings = INTEL_INFO(dev_priv)->num_rings;
 	enum intel_engine_id id;
 	int j, ret;
 
@@ -3203,7 +3140,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
 		return ret;
 	intel_runtime_pm_get(dev_priv);
 
-	if (IS_BROADWELL(dev)) {
+	if (IS_BROADWELL(dev_priv)) {
 		struct page *page;
 		uint64_t *seqno;
 
@@ -3258,9 +3195,8 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
 
 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	int i;
 
 	drm_modeset_lock_all(dev);
@@ -3288,9 +3224,8 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
 	int i;
 	int ret;
 	struct intel_engine_cs *engine;
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct i915_workarounds *workarounds = &dev_priv->workarounds;
 	enum intel_engine_id id;
 
@@ -3326,15 +3261,14 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
 
 static int i915_ddb_info(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct skl_ddb_allocation *ddb;
 	struct skl_ddb_entry *entry;
 	enum pipe pipe;
 	int plane;
 
-	if (INTEL_INFO(dev)->gen < 9)
+	if (INTEL_GEN(dev_priv) < 9)
 		return 0;
 
 	drm_modeset_lock_all(dev);
@@ -3364,7 +3298,8 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
 }
 
 static void drrs_status_per_crtc(struct seq_file *m,
-		struct drm_device *dev, struct intel_crtc *intel_crtc)
+				 struct drm_device *dev,
+				 struct intel_crtc *intel_crtc)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct i915_drrs *drrs = &dev_priv->drrs;
@@ -3433,8 +3368,8 @@ static void drrs_status_per_crtc(struct seq_file *m,
 
 static int i915_drrs_status(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = m->private;
-	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_crtc *intel_crtc;
 	int active_crtc_cnt = 0;
 
@@ -3457,14 +3392,14 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
 
 struct pipe_crc_info {
 	const char *name;
-	struct drm_device *dev;
+	struct drm_i915_private *dev_priv;
 	enum pipe pipe;
 };
 
 static int i915_dp_mst_info(struct seq_file *m, void *unused)
 {
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_encoder *intel_encoder;
 	struct intel_digital_port *intel_dig_port;
 	struct drm_connector *connector;
@@ -3493,10 +3428,10 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
 static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
 {
 	struct pipe_crc_info *info = inode->i_private;
-	struct drm_i915_private *dev_priv = to_i915(info->dev);
+	struct drm_i915_private *dev_priv = info->dev_priv;
 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
 
-	if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
+	if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes)
 		return -ENODEV;
 
 	spin_lock_irq(&pipe_crc->lock);
@@ -3517,7 +3452,7 @@ static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
 static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
 {
 	struct pipe_crc_info *info = inode->i_private;
-	struct drm_i915_private *dev_priv = to_i915(info->dev);
+	struct drm_i915_private *dev_priv = info->dev_priv;
 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
 
 	spin_lock_irq(&pipe_crc->lock);
@@ -3544,8 +3479,7 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
 		   loff_t *pos)
 {
 	struct pipe_crc_info *info = filep->private_data;
-	struct drm_device *dev = info->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = info->dev_priv;
 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
 	char buf[PIPE_CRC_BUFFER_LEN];
 	int n_entries;
@@ -3641,11 +3575,11 @@ static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
 				enum pipe pipe)
 {
-	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = to_i915(minor->dev);
 	struct dentry *ent;
 	struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
 
-	info->dev = dev;
+	info->dev_priv = dev_priv;
 	ent = debugfs_create_file(info->name, S_IRUGO, root, info,
 				  &i915_pipe_crc_fops);
 	if (!ent)
@@ -3675,8 +3609,7 @@ static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
 
 static int display_crc_ctl_show(struct seq_file *m, void *data)
 {
-	struct drm_device *dev = m->private;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = m->private;
 	int i;
 
 	for (i = 0; i < I915_MAX_PIPES; i++)
@@ -3688,9 +3621,7 @@ static int display_crc_ctl_show(struct seq_file *m, void *data)
 
 static int display_crc_ctl_open(struct inode *inode, struct file *file)
 {
-	struct drm_device *dev = inode->i_private;
-
-	return single_open(file, display_crc_ctl_show, dev);
+	return single_open(file, display_crc_ctl_show, inode->i_private);
 }
 
 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@@ -3713,9 +3644,11 @@ static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
 	return 0;
 }
 
-static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
+static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
+				     enum pipe pipe,
 				     enum intel_pipe_crc_source *source)
 {
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_encoder *encoder;
 	struct intel_crtc *crtc;
 	struct intel_digital_port *dig_port;
@@ -3765,16 +3698,15 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
 	return ret;
 }
 
-static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
+static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
 				enum pipe pipe,
 				enum intel_pipe_crc_source *source,
 				uint32_t *val)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	bool need_stable_symbols = false;
 
 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
-		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+		int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
 		if (ret)
 			return ret;
 	}
@@ -3792,7 +3724,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
 		need_stable_symbols = true;
 		break;
 	case INTEL_PIPE_CRC_SOURCE_DP_D:
-		if (!IS_CHERRYVIEW(dev))
+		if (!IS_CHERRYVIEW(dev_priv))
 			return -EINVAL;
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
 		need_stable_symbols = true;
@@ -3836,16 +3768,15 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
 	return 0;
 }
 
-static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
+static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
 				 enum pipe pipe,
 				 enum intel_pipe_crc_source *source,
 				 uint32_t *val)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	bool need_stable_symbols = false;
 
 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
-		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+		int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
 		if (ret)
 			return ret;
 	}
@@ -3855,24 +3786,24 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
 		break;
 	case INTEL_PIPE_CRC_SOURCE_TV:
-		if (!SUPPORTS_TV(dev))
+		if (!SUPPORTS_TV(dev_priv))
 			return -EINVAL;
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
 		break;
 	case INTEL_PIPE_CRC_SOURCE_DP_B:
-		if (!IS_G4X(dev))
+		if (!IS_G4X(dev_priv))
 			return -EINVAL;
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
 		need_stable_symbols = true;
 		break;
 	case INTEL_PIPE_CRC_SOURCE_DP_C:
-		if (!IS_G4X(dev))
+		if (!IS_G4X(dev_priv))
 			return -EINVAL;
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
 		need_stable_symbols = true;
 		break;
 	case INTEL_PIPE_CRC_SOURCE_DP_D:
-		if (!IS_G4X(dev))
+		if (!IS_G4X(dev_priv))
 			return -EINVAL;
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
 		need_stable_symbols = true;
@@ -3896,7 +3827,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
 	if (need_stable_symbols) {
 		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
 
-		WARN_ON(!IS_G4X(dev));
+		WARN_ON(!IS_G4X(dev_priv));
 
 		I915_WRITE(PORT_DFT_I9XX,
 			   I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
@@ -3912,10 +3843,9 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
 	return 0;
 }
 
-static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
+static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
 					 enum pipe pipe)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
 
 	switch (pipe) {
@@ -3937,10 +3867,9 @@ static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
 
 }
 
-static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
+static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
 					 enum pipe pipe)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
 
 	if (pipe == PIPE_A)
@@ -3981,9 +3910,10 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
 	return 0;
 }
 
-static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
+static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
+					bool enable)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_crtc *crtc =
 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
 	struct intel_crtc_state *pipe_config;
@@ -4017,7 +3947,7 @@ out:
 		drm_atomic_state_free(state);
 }
 
-static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
+static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
 				enum pipe pipe,
 				enum intel_pipe_crc_source *source,
 				uint32_t *val)
@@ -4033,8 +3963,8 @@ static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
 		break;
 	case INTEL_PIPE_CRC_SOURCE_PF:
-		if (IS_HASWELL(dev) && pipe == PIPE_A)
-			hsw_trans_edp_pipe_A_crc_wa(dev, true);
+		if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
+			hsw_trans_edp_pipe_A_crc_wa(dev_priv, true);
 
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
 		break;
@@ -4048,13 +3978,14 @@ static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
 	return 0;
 }
 
-static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
+static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
+			       enum pipe pipe,
 			       enum intel_pipe_crc_source source)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_device *dev = &dev_priv->drm;
 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
-	struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
-									pipe));
+	struct intel_crtc *crtc =
+			to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
 	enum intel_display_power_domain power_domain;
 	u32 val = 0; /* shut up gcc */
 	int ret;
@@ -4072,16 +4003,16 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
 		return -EIO;
 	}
 
-	if (IS_GEN2(dev))
+	if (IS_GEN2(dev_priv))
 		ret = i8xx_pipe_crc_ctl_reg(&source, &val);
-	else if (INTEL_INFO(dev)->gen < 5)
-		ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
-	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-		ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
-	else if (IS_GEN5(dev) || IS_GEN6(dev))
+	else if (INTEL_GEN(dev_priv) < 5)
+		ret = i9xx_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
+	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		ret = vlv_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
+	else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
 		ret = ilk_pipe_crc_ctl_reg(&source, &val);
 	else
-		ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
+		ret = ivb_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
 
 	if (ret != 0)
 		goto out;
@@ -4145,12 +4076,12 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
 
 		kfree(entries);
 
-		if (IS_G4X(dev))
-			g4x_undo_pipe_scramble_reset(dev, pipe);
-		else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-			vlv_undo_pipe_scramble_reset(dev, pipe);
-		else if (IS_HASWELL(dev) && pipe == PIPE_A)
-			hsw_trans_edp_pipe_A_crc_wa(dev, false);
+		if (IS_G4X(dev_priv))
+			g4x_undo_pipe_scramble_reset(dev_priv, pipe);
+		else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+			vlv_undo_pipe_scramble_reset(dev_priv, pipe);
+		else if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
+			hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
 
 		hsw_enable_ips(crtc);
 	}
@@ -4254,7 +4185,8 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
 	return -EINVAL;
 }
 
-static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
+static int display_crc_ctl_parse(struct drm_i915_private *dev_priv,
+				 char *buf, size_t len)
 {
 #define N_WORDS 3
 	int n_words;
@@ -4285,14 +4217,14 @@ static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
 		return -EINVAL;
 	}
 
-	return pipe_crc_set_source(dev, pipe, source);
+	return pipe_crc_set_source(dev_priv, pipe, source);
 }
 
 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
 				     size_t len, loff_t *offp)
 {
 	struct seq_file *m = file->private_data;
-	struct drm_device *dev = m->private;
+	struct drm_i915_private *dev_priv = m->private;
 	char *tmpbuf;
 	int ret;
 
@@ -4315,7 +4247,7 @@ static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
 	}
 	tmpbuf[len] = '\0';
 
-	ret = display_crc_ctl_parse(dev, tmpbuf, len);
+	ret = display_crc_ctl_parse(dev_priv, tmpbuf, len);
 
 out:
 	kfree(tmpbuf);
@@ -4336,8 +4268,8 @@ static const struct file_operations i915_display_crc_ctl_fops = {
 };
 
 static ssize_t i915_displayport_test_active_write(struct file *file,
-					    const char __user *ubuf,
-					    size_t len, loff_t *offp)
+						  const char __user *ubuf,
+						  size_t len, loff_t *offp)
 {
 	char *input_buffer;
 	int status = 0;
@@ -4367,7 +4299,6 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
 	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
 
 	list_for_each_entry(connector, connector_list, head) {
-
 		if (connector->connector_type !=
 		    DRM_MODE_CONNECTOR_DisplayPort)
 			continue;
@@ -4405,7 +4336,6 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
 	struct intel_dp *intel_dp;
 
 	list_for_each_entry(connector, connector_list, head) {
-
 		if (connector->connector_type !=
 		    DRM_MODE_CONNECTOR_DisplayPort)
 			continue;
@@ -4425,11 +4355,12 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
 }
 
 static int i915_displayport_test_active_open(struct inode *inode,
-				       struct file *file)
+					     struct file *file)
 {
-	struct drm_device *dev = inode->i_private;
+	struct drm_i915_private *dev_priv = inode->i_private;
 
-	return single_open(file, i915_displayport_test_active_show, dev);
+	return single_open(file, i915_displayport_test_active_show,
+			   &dev_priv->drm);
 }
 
 static const struct file_operations i915_displayport_test_active_fops = {
@@ -4449,7 +4380,6 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
 	struct intel_dp *intel_dp;
 
 	list_for_each_entry(connector, connector_list, head) {
-
 		if (connector->connector_type !=
 		    DRM_MODE_CONNECTOR_DisplayPort)
 			continue;
@@ -4465,11 +4395,12 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
 	return 0;
 }
 static int i915_displayport_test_data_open(struct inode *inode,
-				       struct file *file)
+					   struct file *file)
 {
-	struct drm_device *dev = inode->i_private;
+	struct drm_i915_private *dev_priv = inode->i_private;
 
-	return single_open(file, i915_displayport_test_data_show, dev);
+	return single_open(file, i915_displayport_test_data_show,
+			   &dev_priv->drm);
 }
 
 static const struct file_operations i915_displayport_test_data_fops = {
@@ -4488,7 +4419,6 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
 	struct intel_dp *intel_dp;
 
 	list_for_each_entry(connector, connector_list, head) {
-
 		if (connector->connector_type !=
 		    DRM_MODE_CONNECTOR_DisplayPort)
 			continue;
@@ -4507,9 +4437,10 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
 static int i915_displayport_test_type_open(struct inode *inode,
 				       struct file *file)
 {
-	struct drm_device *dev = inode->i_private;
+	struct drm_i915_private *dev_priv = inode->i_private;
 
-	return single_open(file, i915_displayport_test_type_show, dev);
+	return single_open(file, i915_displayport_test_type_show,
+			   &dev_priv->drm);
 }
 
 static const struct file_operations i915_displayport_test_type_fops = {
@@ -4522,13 +4453,14 @@ static const struct file_operations i915_displayport_test_type_fops = {
 
 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 {
-	struct drm_device *dev = m->private;
+	struct drm_i915_private *dev_priv = m->private;
+	struct drm_device *dev = &dev_priv->drm;
 	int level;
 	int num_levels;
 
-	if (IS_CHERRYVIEW(dev))
+	if (IS_CHERRYVIEW(dev_priv))
 		num_levels = 3;
-	else if (IS_VALLEYVIEW(dev))
+	else if (IS_VALLEYVIEW(dev_priv))
 		num_levels = 1;
 	else
 		num_levels = ilk_wm_max_level(dev) + 1;
@@ -4542,8 +4474,8 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 		 * - WM1+ latency values in 0.5us units
 		 * - latencies are in us on gen9/vlv/chv
 		 */
-		if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev) ||
-		    IS_CHERRYVIEW(dev))
+		if (INTEL_GEN(dev_priv) >= 9 || IS_VALLEYVIEW(dev_priv) ||
+		    IS_CHERRYVIEW(dev_priv))
 			latency *= 10;
 		else if (level > 0)
 			latency *= 5;
@@ -4557,14 +4489,13 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 
 static int pri_wm_latency_show(struct seq_file *m, void *data)
 {
-	struct drm_device *dev = m->private;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = m->private;
 	const uint16_t *latencies;
 
-	if (INTEL_INFO(dev)->gen >= 9)
+	if (INTEL_GEN(dev_priv) >= 9)
 		latencies = dev_priv->wm.skl_latency;
 	else
-		latencies = to_i915(dev)->wm.pri_latency;
+		latencies = dev_priv->wm.pri_latency;
 
 	wm_latency_show(m, latencies);
 
@@ -4573,14 +4504,13 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
 
 static int spr_wm_latency_show(struct seq_file *m, void *data)
 {
-	struct drm_device *dev = m->private;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = m->private;
 	const uint16_t *latencies;
 
-	if (INTEL_INFO(dev)->gen >= 9)
+	if (INTEL_GEN(dev_priv) >= 9)
 		latencies = dev_priv->wm.skl_latency;
 	else
-		latencies = to_i915(dev)->wm.spr_latency;
+		latencies = dev_priv->wm.spr_latency;
 
 	wm_latency_show(m, latencies);
 
@@ -4589,14 +4519,13 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
 
 static int cur_wm_latency_show(struct seq_file *m, void *data)
 {
-	struct drm_device *dev = m->private;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = m->private;
 	const uint16_t *latencies;
 
-	if (INTEL_INFO(dev)->gen >= 9)
+	if (INTEL_GEN(dev_priv) >= 9)
 		latencies = dev_priv->wm.skl_latency;
 	else
-		latencies = to_i915(dev)->wm.cur_latency;
+		latencies = dev_priv->wm.cur_latency;
 
 	wm_latency_show(m, latencies);
 
@@ -4605,48 +4534,49 @@ static int cur_wm_latency_show(struct seq_file *m, void *data)
 
 static int pri_wm_latency_open(struct inode *inode, struct file *file)
 {
-	struct drm_device *dev = inode->i_private;
+	struct drm_i915_private *dev_priv = inode->i_private;
 
-	if (INTEL_INFO(dev)->gen < 5)
+	if (INTEL_GEN(dev_priv) < 5)
 		return -ENODEV;
 
-	return single_open(file, pri_wm_latency_show, dev);
+	return single_open(file, pri_wm_latency_show, dev_priv);
 }
 
 static int spr_wm_latency_open(struct inode *inode, struct file *file)
 {
-	struct drm_device *dev = inode->i_private;
+	struct drm_i915_private *dev_priv = inode->i_private;
 
-	if (HAS_GMCH_DISPLAY(dev))
+	if (HAS_GMCH_DISPLAY(dev_priv))
 		return -ENODEV;
 
-	return single_open(file, spr_wm_latency_show, dev);
+	return single_open(file, spr_wm_latency_show, dev_priv);
 }
 
 static int cur_wm_latency_open(struct inode *inode, struct file *file)
 {
-	struct drm_device *dev = inode->i_private;
+	struct drm_i915_private *dev_priv = inode->i_private;
 
-	if (HAS_GMCH_DISPLAY(dev))
+	if (HAS_GMCH_DISPLAY(dev_priv))
 		return -ENODEV;
 
-	return single_open(file, cur_wm_latency_show, dev);
+	return single_open(file, cur_wm_latency_show, dev_priv);
 }
 
 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
 				size_t len, loff_t *offp, uint16_t wm[8])
 {
 	struct seq_file *m = file->private_data;
-	struct drm_device *dev = m->private;
+	struct drm_i915_private *dev_priv = m->private;
+	struct drm_device *dev = &dev_priv->drm;
 	uint16_t new[8] = { 0 };
 	int num_levels;
 	int level;
 	int ret;
 	char tmp[32];
 
-	if (IS_CHERRYVIEW(dev))
+	if (IS_CHERRYVIEW(dev_priv))
 		num_levels = 3;
-	else if (IS_VALLEYVIEW(dev))
+	else if (IS_VALLEYVIEW(dev_priv))
 		num_levels = 1;
 	else
 		num_levels = ilk_wm_max_level(dev) + 1;
@@ -4680,14 +4610,13 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
 				    size_t len, loff_t *offp)
 {
 	struct seq_file *m = file->private_data;
-	struct drm_device *dev = m->private;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = m->private;
 	uint16_t *latencies;
 
-	if (INTEL_INFO(dev)->gen >= 9)
+	if (INTEL_GEN(dev_priv) >= 9)
 		latencies = dev_priv->wm.skl_latency;
 	else
-		latencies = to_i915(dev)->wm.pri_latency;
+		latencies = dev_priv->wm.pri_latency;
 
 	return wm_latency_write(file, ubuf, len, offp, latencies);
 }
@@ -4696,14 +4625,13 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
 				    size_t len, loff_t *offp)
 {
 	struct seq_file *m = file->private_data;
-	struct drm_device *dev = m->private;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = m->private;
 	uint16_t *latencies;
 
-	if (INTEL_INFO(dev)->gen >= 9)
+	if (INTEL_GEN(dev_priv) >= 9)
 		latencies = dev_priv->wm.skl_latency;
 	else
-		latencies = to_i915(dev)->wm.spr_latency;
+		latencies = dev_priv->wm.spr_latency;
 
 	return wm_latency_write(file, ubuf, len, offp, latencies);
 }
@@ -4712,14 +4640,13 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
 				    size_t len, loff_t *offp)
 {
 	struct seq_file *m = file->private_data;
-	struct drm_device *dev = m->private;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = m->private;
 	uint16_t *latencies;
 
-	if (INTEL_INFO(dev)->gen >= 9)
+	if (INTEL_GEN(dev_priv) >= 9)
 		latencies = dev_priv->wm.skl_latency;
 	else
-		latencies = to_i915(dev)->wm.cur_latency;
+		latencies = dev_priv->wm.cur_latency;
 
 	return wm_latency_write(file, ubuf, len, offp, latencies);
 }
@@ -4754,8 +4681,7 @@ static const struct file_operations i915_cur_wm_latency_fops = {
 static int
 i915_wedged_get(void *data, u64 *val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 
 	*val = i915_terminally_wedged(&dev_priv->gpu_error);
 
@@ -4765,8 +4691,7 @@ i915_wedged_get(void *data, u64 *val)
 static int
 i915_wedged_set(void *data, u64 val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 
 	/*
 	 * There is no safeguard against this debugfs entry colliding
@@ -4796,8 +4721,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
 static int
 i915_ring_missed_irq_get(void *data, u64 *val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 
 	*val = dev_priv->gpu_error.missed_irq_rings;
 	return 0;
@@ -4806,8 +4730,8 @@ i915_ring_missed_irq_get(void *data, u64 *val)
 static int
 i915_ring_missed_irq_set(void *data, u64 val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
+	struct drm_device *dev = &dev_priv->drm;
 	int ret;
 
 	/* Lock against concurrent debugfs callers */
@@ -4827,8 +4751,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
 static int
 i915_ring_test_irq_get(void *data, u64 *val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 
 	*val = dev_priv->gpu_error.test_irq_rings;
 
@@ -4838,8 +4761,7 @@ i915_ring_test_irq_get(void *data, u64 *val)
 static int
 i915_ring_test_irq_set(void *data, u64 val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 
 	val &= INTEL_INFO(dev_priv)->ring_mask;
 	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
@@ -4871,8 +4793,8 @@ i915_drop_caches_get(void *data, u64 *val)
 static int
 i915_drop_caches_set(void *data, u64 val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
+	struct drm_device *dev = &dev_priv->drm;
 	int ret;
 
 	DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
@@ -4884,7 +4806,9 @@ i915_drop_caches_set(void *data, u64 val)
 		return ret;
 
 	if (val & DROP_ACTIVE) {
-		ret = i915_gem_wait_for_idle(dev_priv, true);
+		ret = i915_gem_wait_for_idle(dev_priv,
+					     I915_WAIT_INTERRUPTIBLE |
+					     I915_WAIT_LOCKED);
 		if (ret)
 			goto unlock;
 	}
@@ -4911,10 +4835,9 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
 static int
 i915_max_freq_get(void *data, u64 *val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 
-	if (INTEL_INFO(dev)->gen < 6)
+	if (INTEL_GEN(dev_priv) < 6)
 		return -ENODEV;
 
 	*val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
@@ -4924,12 +4847,11 @@ i915_max_freq_get(void *data, u64 *val)
 static int
 i915_max_freq_set(void *data, u64 val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 	u32 hw_max, hw_min;
 	int ret;
 
-	if (INTEL_INFO(dev)->gen < 6)
+	if (INTEL_GEN(dev_priv) < 6)
 		return -ENODEV;
 
 	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
@@ -4967,8 +4889,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
 static int
 i915_min_freq_get(void *data, u64 *val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 
 	if (INTEL_GEN(dev_priv) < 6)
 		return -ENODEV;
@@ -4980,8 +4901,7 @@ i915_min_freq_get(void *data, u64 *val)
 static int
 i915_min_freq_set(void *data, u64 val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 	u32 hw_max, hw_min;
 	int ret;
 
@@ -5002,7 +4922,8 @@ i915_min_freq_set(void *data, u64 val)
 	hw_max = dev_priv->rps.max_freq;
 	hw_min = dev_priv->rps.min_freq;
 
-	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
+	if (val < hw_min ||
+	    val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
 		mutex_unlock(&dev_priv->rps.hw_lock);
 		return -EINVAL;
 	}
@@ -5023,12 +4944,12 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
 static int
 i915_cache_sharing_get(void *data, u64 *val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
+	struct drm_device *dev = &dev_priv->drm;
 	u32 snpcr;
 	int ret;
 
-	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
 		return -ENODEV;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -5039,7 +4960,7 @@ i915_cache_sharing_get(void *data, u64 *val)
 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
 
 	intel_runtime_pm_put(dev_priv);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&dev->struct_mutex);
 
 	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
 
@@ -5049,11 +4970,10 @@ i915_cache_sharing_get(void *data, u64 *val)
 static int
 i915_cache_sharing_set(void *data, u64 val)
 {
-	struct drm_device *dev = data;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = data;
 	u32 snpcr;
 
-	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
 		return -ENODEV;
 
 	if (val > 3)
@@ -5076,18 +4996,9 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
 			i915_cache_sharing_get, i915_cache_sharing_set,
 			"%llu\n");
 
-struct sseu_dev_status {
-	unsigned int slice_total;
-	unsigned int subslice_total;
-	unsigned int subslice_per_slice;
-	unsigned int eu_total;
-	unsigned int eu_per_subslice;
-};
-
-static void cherryview_sseu_device_status(struct drm_device *dev,
-					  struct sseu_dev_status *stat)
+static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
+					  struct sseu_dev_info *sseu)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	int ss_max = 2;
 	int ss;
 	u32 sig1[ss_max], sig2[ss_max];
@@ -5104,28 +5015,27 @@ static void cherryview_sseu_device_status(struct drm_device *dev,
 			/* skip disabled subslice */
 			continue;
 
-		stat->slice_total = 1;
-		stat->subslice_per_slice++;
+		sseu->slice_mask = BIT(0);
+		sseu->subslice_mask |= BIT(ss);
 		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
 			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
 			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
 			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
-		stat->eu_total += eu_cnt;
-		stat->eu_per_subslice = max(stat->eu_per_subslice, eu_cnt);
+		sseu->eu_total += eu_cnt;
+		sseu->eu_per_subslice = max_t(unsigned int,
+					      sseu->eu_per_subslice, eu_cnt);
 	}
-	stat->subslice_total = stat->subslice_per_slice;
 }
 
-static void gen9_sseu_device_status(struct drm_device *dev,
-				    struct sseu_dev_status *stat)
+static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
+				    struct sseu_dev_info *sseu)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	int s_max = 3, ss_max = 4;
 	int s, ss;
 	u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
 
 	/* BXT has a single slice and at most 3 subslices. */
-	if (IS_BROXTON(dev)) {
+	if (IS_BROXTON(dev_priv)) {
 		s_max = 1;
 		ss_max = 3;
 	}
@@ -5146,133 +5056,134 @@ static void gen9_sseu_device_status(struct drm_device *dev,
 		     GEN9_PGCTL_SSB_EU311_ACK;
 
 	for (s = 0; s < s_max; s++) {
-		unsigned int ss_cnt = 0;
-
 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
 			/* skip disabled slice */
 			continue;
 
-		stat->slice_total++;
+		sseu->slice_mask |= BIT(s);
 
-		if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
-			ss_cnt = INTEL_INFO(dev)->subslice_per_slice;
+		if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+			sseu->subslice_mask =
+				INTEL_INFO(dev_priv)->sseu.subslice_mask;
 
 		for (ss = 0; ss < ss_max; ss++) {
 			unsigned int eu_cnt;
 
-			if (IS_BROXTON(dev) &&
-			    !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
-				/* skip disabled subslice */
-				continue;
+			if (IS_BROXTON(dev_priv)) {
+				if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
+					/* skip disabled subslice */
+					continue;
 
-			if (IS_BROXTON(dev))
-				ss_cnt++;
+				sseu->subslice_mask |= BIT(ss);
+			}
 
 			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
 					       eu_mask[ss%2]);
-			stat->eu_total += eu_cnt;
-			stat->eu_per_subslice = max(stat->eu_per_subslice,
-						    eu_cnt);
+			sseu->eu_total += eu_cnt;
+			sseu->eu_per_subslice = max_t(unsigned int,
+						      sseu->eu_per_subslice,
+						      eu_cnt);
 		}
-
-		stat->subslice_total += ss_cnt;
-		stat->subslice_per_slice = max(stat->subslice_per_slice,
-					       ss_cnt);
 	}
 }
 
-static void broadwell_sseu_device_status(struct drm_device *dev,
-					 struct sseu_dev_status *stat)
+static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
+					 struct sseu_dev_info *sseu)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	int s;
 	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
+	int s;
 
-	stat->slice_total = hweight32(slice_info & GEN8_LSLICESTAT_MASK);
+	sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
 
-	if (stat->slice_total) {
-		stat->subslice_per_slice = INTEL_INFO(dev)->subslice_per_slice;
-		stat->subslice_total = stat->slice_total *
-				       stat->subslice_per_slice;
-		stat->eu_per_subslice = INTEL_INFO(dev)->eu_per_subslice;
-		stat->eu_total = stat->eu_per_subslice * stat->subslice_total;
+	if (sseu->slice_mask) {
+		sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask;
+		sseu->eu_per_subslice =
+				INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
+		sseu->eu_total = sseu->eu_per_subslice *
+				 sseu_subslice_total(sseu);
 
 		/* subtract fused off EU(s) from enabled slice(s) */
-		for (s = 0; s < stat->slice_total; s++) {
-			u8 subslice_7eu = INTEL_INFO(dev)->subslice_7eu[s];
+		for (s = 0; s < fls(sseu->slice_mask); s++) {
+			u8 subslice_7eu =
+				INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
 
-			stat->eu_total -= hweight8(subslice_7eu);
+			sseu->eu_total -= hweight8(subslice_7eu);
 		}
 	}
 }
 
-static int i915_sseu_status(struct seq_file *m, void *unused)
+static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
+				 const struct sseu_dev_info *sseu)
 {
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_i915_private *dev_priv = to_i915(node->minor->dev);
-	struct drm_device *dev = &dev_priv->drm;
-	struct sseu_dev_status stat;
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	const char *type = is_available_info ? "Available" : "Enabled";
 
-	if (INTEL_INFO(dev)->gen < 8)
-		return -ENODEV;
+	seq_printf(m, "  %s Slice Mask: %04x\n", type,
+		   sseu->slice_mask);
+	seq_printf(m, "  %s Slice Total: %u\n", type,
+		   hweight8(sseu->slice_mask));
+	seq_printf(m, "  %s Subslice Total: %u\n", type,
+		   sseu_subslice_total(sseu));
+	seq_printf(m, "  %s Subslice Mask: %04x\n", type,
+		   sseu->subslice_mask);
+	seq_printf(m, "  %s Subslice Per Slice: %u\n", type,
+		   hweight8(sseu->subslice_mask));
+	seq_printf(m, "  %s EU Total: %u\n", type,
+		   sseu->eu_total);
+	seq_printf(m, "  %s EU Per Subslice: %u\n", type,
+		   sseu->eu_per_subslice);
+
+	if (!is_available_info)
+		return;
+
+	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
+	if (HAS_POOLED_EU(dev_priv))
+		seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
 
-	seq_puts(m, "SSEU Device Info\n");
-	seq_printf(m, "  Available Slice Total: %u\n",
-		   INTEL_INFO(dev)->slice_total);
-	seq_printf(m, "  Available Subslice Total: %u\n",
-		   INTEL_INFO(dev)->subslice_total);
-	seq_printf(m, "  Available Subslice Per Slice: %u\n",
-		   INTEL_INFO(dev)->subslice_per_slice);
-	seq_printf(m, "  Available EU Total: %u\n",
-		   INTEL_INFO(dev)->eu_total);
-	seq_printf(m, "  Available EU Per Subslice: %u\n",
-		   INTEL_INFO(dev)->eu_per_subslice);
-	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev)));
-	if (HAS_POOLED_EU(dev))
-		seq_printf(m, "  Min EU in pool: %u\n",
-			   INTEL_INFO(dev)->min_eu_in_pool);
 	seq_printf(m, "  Has Slice Power Gating: %s\n",
-		   yesno(INTEL_INFO(dev)->has_slice_pg));
+		   yesno(sseu->has_slice_pg));
 	seq_printf(m, "  Has Subslice Power Gating: %s\n",
-		   yesno(INTEL_INFO(dev)->has_subslice_pg));
+		   yesno(sseu->has_subslice_pg));
 	seq_printf(m, "  Has EU Power Gating: %s\n",
-		   yesno(INTEL_INFO(dev)->has_eu_pg));
+		   yesno(sseu->has_eu_pg));
+}
+
+static int i915_sseu_status(struct seq_file *m, void *unused)
+{
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct sseu_dev_info sseu;
+
+	if (INTEL_GEN(dev_priv) < 8)
+		return -ENODEV;
+
+	seq_puts(m, "SSEU Device Info\n");
+	i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
 
 	seq_puts(m, "SSEU Device Status\n");
-	memset(&stat, 0, sizeof(stat));
+	memset(&sseu, 0, sizeof(sseu));
 
 	intel_runtime_pm_get(dev_priv);
 
-	if (IS_CHERRYVIEW(dev)) {
-		cherryview_sseu_device_status(dev, &stat);
-	} else if (IS_BROADWELL(dev)) {
-		broadwell_sseu_device_status(dev, &stat);
-	} else if (INTEL_INFO(dev)->gen >= 9) {
-		gen9_sseu_device_status(dev, &stat);
+	if (IS_CHERRYVIEW(dev_priv)) {
+		cherryview_sseu_device_status(dev_priv, &sseu);
+	} else if (IS_BROADWELL(dev_priv)) {
+		broadwell_sseu_device_status(dev_priv, &sseu);
+	} else if (INTEL_GEN(dev_priv) >= 9) {
+		gen9_sseu_device_status(dev_priv, &sseu);
 	}
 
 	intel_runtime_pm_put(dev_priv);
 
-	seq_printf(m, "  Enabled Slice Total: %u\n",
-		   stat.slice_total);
-	seq_printf(m, "  Enabled Subslice Total: %u\n",
-		   stat.subslice_total);
-	seq_printf(m, "  Enabled Subslice Per Slice: %u\n",
-		   stat.subslice_per_slice);
-	seq_printf(m, "  Enabled EU Total: %u\n",
-		   stat.eu_total);
-	seq_printf(m, "  Enabled EU Per Subslice: %u\n",
-		   stat.eu_per_subslice);
+	i915_print_sseu_info(m, false, &sseu);
 
 	return 0;
 }
 
 static int i915_forcewake_open(struct inode *inode, struct file *file)
 {
-	struct drm_device *dev = inode->i_private;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = inode->i_private;
 
-	if (INTEL_INFO(dev)->gen < 6)
+	if (INTEL_GEN(dev_priv) < 6)
 		return 0;
 
 	intel_runtime_pm_get(dev_priv);
@@ -5283,10 +5194,9 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
 
 static int i915_forcewake_release(struct inode *inode, struct file *file)
 {
-	struct drm_device *dev = inode->i_private;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = inode->i_private;
 
-	if (INTEL_INFO(dev)->gen < 6)
+	if (INTEL_GEN(dev_priv) < 6)
 		return 0;
 
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -5303,12 +5213,11 @@ static const struct file_operations i915_forcewake_fops = {
 
 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
 {
-	struct drm_device *dev = minor->dev;
 	struct dentry *ent;
 
 	ent = debugfs_create_file("i915_forcewake_user",
 				  S_IRUSR,
-				  root, dev,
+				  root, to_i915(minor->dev),
 				  &i915_forcewake_fops);
 	if (!ent)
 		return -ENOMEM;
@@ -5321,12 +5230,11 @@ static int i915_debugfs_create(struct dentry *root,
 			       const char *name,
 			       const struct file_operations *fops)
 {
-	struct drm_device *dev = minor->dev;
 	struct dentry *ent;
 
 	ent = debugfs_create_file(name,
 				  S_IRUGO | S_IWUSR,
-				  root, dev,
+				  root, to_i915(minor->dev),
 				  fops);
 	if (!ent)
 		return -ENOMEM;
@@ -5413,9 +5321,8 @@ static const struct i915_debugfs_files {
 	{"i915_dp_test_active", &i915_displayport_test_active_fops}
 };
 
-void intel_display_crc_init(struct drm_device *dev)
+void intel_display_crc_init(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum pipe pipe;
 
 	for_each_pipe(dev_priv, pipe) {
@@ -5463,7 +5370,7 @@ void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
 	drm_debugfs_remove_files(i915_debugfs_list,
 				 I915_DEBUGFS_ENTRIES, minor);
 
-	drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
+	drm_debugfs_remove_files((struct drm_info_list *)&i915_forcewake_fops,
 				 1, minor);
 
 	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
@@ -5475,7 +5382,7 @@ void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
 
 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
 		struct drm_info_list *info_list =
-			(struct drm_info_list *) i915_debugfs_files[i].fops;
+			(struct drm_info_list *)i915_debugfs_files[i].fops;
 
 		drm_debugfs_remove_files(info_list, 1, minor);
 	}
@@ -5555,6 +5462,40 @@ static const struct file_operations i915_dpcd_fops = {
 	.release = single_release,
 };
 
+static int i915_panel_show(struct seq_file *m, void *data)
+{
+	struct drm_connector *connector = m->private;
+	struct intel_dp *intel_dp =
+		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+
+	if (connector->status != connector_status_connected)
+		return -ENODEV;
+
+	seq_printf(m, "Panel power up delay: %d\n",
+		   intel_dp->panel_power_up_delay);
+	seq_printf(m, "Panel power down delay: %d\n",
+		   intel_dp->panel_power_down_delay);
+	seq_printf(m, "Backlight on delay: %d\n",
+		   intel_dp->backlight_on_delay);
+	seq_printf(m, "Backlight off delay: %d\n",
+		   intel_dp->backlight_off_delay);
+
+	return 0;
+}
+
+static int i915_panel_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, i915_panel_show, inode->i_private);
+}
+
+static const struct file_operations i915_panel_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_panel_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
 /**
  * i915_debugfs_connector_add - add i915 specific connector debugfs files
  * @connector: pointer to a registered drm_connector
@@ -5574,8 +5515,12 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
 
 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
-		debugfs_create_file("i915_dpcd", S_IRUGO, root, connector,
-				    &i915_dpcd_fops);
+		debugfs_create_file("i915_dpcd", S_IRUGO, root,
+				    connector, &i915_dpcd_fops);
+
+	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
+				    connector, &i915_panel_fops);
 
 	return 0;
 }

+ 147 - 153
drivers/gpu/drm/i915/i915_drv.c

@@ -77,7 +77,7 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
 	      const char *fmt, ...)
 {
 	static bool shown_bug_once;
-	struct device *dev = dev_priv->drm.dev;
+	struct device *kdev = dev_priv->drm.dev;
 	bool is_error = level[1] <= KERN_ERR[1];
 	bool is_debug = level[1] == KERN_DEBUG[1];
 	struct va_format vaf;
@@ -91,11 +91,11 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
 	vaf.fmt = fmt;
 	vaf.va = &args;
 
-	dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
+	dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
 		   __builtin_return_address(0), &vaf);
 
 	if (is_error && !shown_bug_once) {
-		dev_notice(dev, "%s", FDO_BUG_MSG);
+		dev_notice(kdev, "%s", FDO_BUG_MSG);
 		shown_bug_once = true;
 	}
 
@@ -232,6 +232,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	drm_i915_getparam_t *param = data;
 	int value;
 
@@ -242,13 +243,10 @@ static int i915_getparam(struct drm_device *dev, void *data,
 		/* Reject all old ums/dri params. */
 		return -ENODEV;
 	case I915_PARAM_CHIPSET_ID:
-		value = dev->pdev->device;
+		value = pdev->device;
 		break;
 	case I915_PARAM_REVISION:
-		value = dev->pdev->revision;
-		break;
-	case I915_PARAM_HAS_GEM:
-		value = 1;
+		value = pdev->revision;
 		break;
 	case I915_PARAM_NUM_FENCES_AVAIL:
 		value = dev_priv->num_fence_regs;
@@ -256,13 +254,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
 	case I915_PARAM_HAS_OVERLAY:
 		value = dev_priv->overlay ? 1 : 0;
 		break;
-	case I915_PARAM_HAS_PAGEFLIPPING:
-		value = 1;
-		break;
-	case I915_PARAM_HAS_EXECBUF2:
-		/* depends on GEM */
-		value = 1;
-		break;
 	case I915_PARAM_HAS_BSD:
 		value = intel_engine_initialized(&dev_priv->engine[VCS]);
 		break;
@@ -275,67 +266,34 @@ static int i915_getparam(struct drm_device *dev, void *data,
 	case I915_PARAM_HAS_BSD2:
 		value = intel_engine_initialized(&dev_priv->engine[VCS2]);
 		break;
-	case I915_PARAM_HAS_RELAXED_FENCING:
-		value = 1;
-		break;
-	case I915_PARAM_HAS_COHERENT_RINGS:
-		value = 1;
-		break;
 	case I915_PARAM_HAS_EXEC_CONSTANTS:
-		value = INTEL_INFO(dev)->gen >= 4;
-		break;
-	case I915_PARAM_HAS_RELAXED_DELTA:
-		value = 1;
-		break;
-	case I915_PARAM_HAS_GEN7_SOL_RESET:
-		value = 1;
+		value = INTEL_GEN(dev_priv) >= 4;
 		break;
 	case I915_PARAM_HAS_LLC:
-		value = HAS_LLC(dev);
+		value = HAS_LLC(dev_priv);
 		break;
 	case I915_PARAM_HAS_WT:
-		value = HAS_WT(dev);
+		value = HAS_WT(dev_priv);
 		break;
 	case I915_PARAM_HAS_ALIASING_PPGTT:
-		value = USES_PPGTT(dev);
-		break;
-	case I915_PARAM_HAS_WAIT_TIMEOUT:
-		value = 1;
+		value = USES_PPGTT(dev_priv);
 		break;
 	case I915_PARAM_HAS_SEMAPHORES:
 		value = i915.semaphores;
 		break;
-	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
-		value = 1;
-		break;
 	case I915_PARAM_HAS_SECURE_BATCHES:
 		value = capable(CAP_SYS_ADMIN);
 		break;
-	case I915_PARAM_HAS_PINNED_BATCHES:
-		value = 1;
-		break;
-	case I915_PARAM_HAS_EXEC_NO_RELOC:
-		value = 1;
-		break;
-	case I915_PARAM_HAS_EXEC_HANDLE_LUT:
-		value = 1;
-		break;
 	case I915_PARAM_CMD_PARSER_VERSION:
 		value = i915_cmd_parser_get_version(dev_priv);
 		break;
-	case I915_PARAM_HAS_COHERENT_PHYS_GTT:
-		value = 1;
-		break;
-	case I915_PARAM_MMAP_VERSION:
-		value = 1;
-		break;
 	case I915_PARAM_SUBSLICE_TOTAL:
-		value = INTEL_INFO(dev)->subslice_total;
+		value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu);
 		if (!value)
 			return -ENODEV;
 		break;
 	case I915_PARAM_EU_TOTAL:
-		value = INTEL_INFO(dev)->eu_total;
+		value = INTEL_INFO(dev_priv)->sseu.eu_total;
 		if (!value)
 			return -ENODEV;
 		break;
@@ -343,16 +301,43 @@ static int i915_getparam(struct drm_device *dev, void *data,
 		value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
 		break;
 	case I915_PARAM_HAS_RESOURCE_STREAMER:
-		value = HAS_RESOURCE_STREAMER(dev);
-		break;
-	case I915_PARAM_HAS_EXEC_SOFTPIN:
-		value = 1;
+		value = HAS_RESOURCE_STREAMER(dev_priv);
 		break;
 	case I915_PARAM_HAS_POOLED_EU:
-		value = HAS_POOLED_EU(dev);
+		value = HAS_POOLED_EU(dev_priv);
 		break;
 	case I915_PARAM_MIN_EU_IN_POOL:
-		value = INTEL_INFO(dev)->min_eu_in_pool;
+		value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
+		break;
+	case I915_PARAM_MMAP_GTT_VERSION:
+		/* Though we've started our numbering from 1, and so class all
+		 * earlier versions as 0, in effect their value is undefined as
+		 * the ioctl will report EINVAL for the unknown param!
+		 */
+		value = i915_gem_mmap_gtt_version();
+		break;
+	case I915_PARAM_MMAP_VERSION:
+		/* Remember to bump this if the version changes! */
+	case I915_PARAM_HAS_GEM:
+	case I915_PARAM_HAS_PAGEFLIPPING:
+	case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
+	case I915_PARAM_HAS_RELAXED_FENCING:
+	case I915_PARAM_HAS_COHERENT_RINGS:
+	case I915_PARAM_HAS_RELAXED_DELTA:
+	case I915_PARAM_HAS_GEN7_SOL_RESET:
+	case I915_PARAM_HAS_WAIT_TIMEOUT:
+	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
+	case I915_PARAM_HAS_PINNED_BATCHES:
+	case I915_PARAM_HAS_EXEC_NO_RELOC:
+	case I915_PARAM_HAS_EXEC_HANDLE_LUT:
+	case I915_PARAM_HAS_COHERENT_PHYS_GTT:
+	case I915_PARAM_HAS_EXEC_SOFTPIN:
+		/* For the time being all of these are always true;
+		 * if some supported hardware does not have one of these
+		 * features this value needs to be provided from
+		 * INTEL_INFO(), a feature macro, or similar.
+		 */
+		value = 1;
 		break;
 	default:
 		DRM_DEBUG("Unknown parameter %d\n", param->param);
@@ -516,7 +501,7 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
 		pr_info("switched on\n");
 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 		/* i915 resume handler doesn't set to D0 */
-		pci_set_power_state(dev->pdev, PCI_D0);
+		pci_set_power_state(pdev, PCI_D0);
 		i915_resume_switcheroo(dev);
 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
 	} else {
@@ -574,7 +559,6 @@ static void i915_gem_fini(struct drm_device *dev)
 	}
 
 	mutex_lock(&dev->struct_mutex);
-	i915_gem_reset(dev);
 	i915_gem_cleanup_engines(dev);
 	i915_gem_context_fini(dev);
 	mutex_unlock(&dev->struct_mutex);
@@ -585,6 +569,7 @@ static void i915_gem_fini(struct drm_device *dev)
 static int i915_load_modeset_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	int ret;
 
 	if (i915_inject_load_failure())
@@ -601,13 +586,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
 	 * then we do not take part in VGA arbitration and the
 	 * vga_client_register() fails with -ENODEV.
 	 */
-	ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
+	ret = vga_client_register(pdev, dev, NULL, i915_vga_set_decode);
 	if (ret && ret != -ENODEV)
 		goto out;
 
 	intel_register_dsm_handler();
 
-	ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
+	ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
 	if (ret)
 		goto cleanup_vga_client;
 
@@ -659,9 +644,9 @@ cleanup_irq:
 cleanup_csr:
 	intel_csr_ucode_fini(dev_priv);
 	intel_power_domains_fini(dev_priv);
-	vga_switcheroo_unregister_client(dev->pdev);
+	vga_switcheroo_unregister_client(pdev);
 cleanup_vga_client:
-	vga_client_register(dev->pdev, NULL, NULL, NULL);
+	vga_client_register(pdev, NULL, NULL, NULL);
 out:
 	return ret;
 }
@@ -849,7 +834,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
 	intel_init_audio_hooks(dev_priv);
 	i915_gem_load_init(&dev_priv->drm);
 
-	intel_display_crc_init(&dev_priv->drm);
+	intel_display_crc_init(dev_priv);
 
 	intel_device_info_dump(dev_priv);
 
@@ -881,6 +866,7 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
 static int i915_mmio_setup(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	int mmio_bar;
 	int mmio_size;
 
@@ -897,7 +883,7 @@ static int i915_mmio_setup(struct drm_device *dev)
 		mmio_size = 512 * 1024;
 	else
 		mmio_size = 2 * 1024 * 1024;
-	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
+	dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size);
 	if (dev_priv->regs == NULL) {
 		DRM_ERROR("failed to map registers\n");
 
@@ -913,9 +899,10 @@ static int i915_mmio_setup(struct drm_device *dev)
 static void i915_mmio_cleanup(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 
 	intel_teardown_mchbar(dev);
-	pci_iounmap(dev->pdev, dev_priv->regs);
+	pci_iounmap(pdev, dev_priv->regs);
 }
 
 /**
@@ -994,6 +981,7 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
  */
 static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 {
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	struct drm_device *dev = &dev_priv->drm;
 	int ret;
 
@@ -1032,11 +1020,11 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 		goto out_ggtt;
 	}
 
-	pci_set_master(dev->pdev);
+	pci_set_master(pdev);
 
 	/* overlay on gen2 is broken and can't address above 1G */
 	if (IS_GEN2(dev)) {
-		ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
 		if (ret) {
 			DRM_ERROR("failed to set DMA mask\n");
 
@@ -1053,7 +1041,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 	 * which also needs to be handled carefully.
 	 */
 	if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
-		ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
 
 		if (ret) {
 			DRM_ERROR("failed to set DMA mask\n");
@@ -1083,7 +1071,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 	 * stuck interrupts on some machines.
 	 */
 	if (!IS_I945G(dev) && !IS_I945GM(dev)) {
-		if (pci_enable_msi(dev->pdev) < 0)
+		if (pci_enable_msi(pdev) < 0)
 			DRM_DEBUG_DRIVER("can't enable MSI");
 	}
 
@@ -1101,10 +1089,10 @@ out_ggtt:
  */
 static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
 {
-	struct drm_device *dev = &dev_priv->drm;
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 
-	if (dev->pdev->msi_enabled)
-		pci_disable_msi(dev->pdev);
+	if (pdev->msi_enabled)
+		pci_disable_msi(pdev);
 
 	pm_qos_remove_request(&dev_priv->pm_qos);
 	i915_ggtt_cleanup_hw(dev_priv);
@@ -1133,7 +1121,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
 	/* Reveal our presence to userspace */
 	if (drm_dev_register(dev, 0) == 0) {
 		i915_debugfs_register(dev_priv);
-		i915_setup_sysfs(dev);
+		i915_setup_sysfs(dev_priv);
 	} else
 		DRM_ERROR("Failed to register driver for userspace access!\n");
 
@@ -1170,7 +1158,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
 	acpi_video_unregister();
 	intel_opregion_unregister(dev_priv);
 
-	i915_teardown_sysfs(&dev_priv->drm);
+	i915_teardown_sysfs(dev_priv);
 	i915_debugfs_unregister(dev_priv);
 	drm_dev_unregister(&dev_priv->drm);
 
@@ -1250,6 +1238,11 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	intel_runtime_pm_enable(dev_priv);
 
+	/* Everything is in place, we can now relax! */
+	DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+		 driver.name, driver.major, driver.minor, driver.patchlevel,
+		 driver.date, pci_name(pdev), dev_priv->drm.primary->index);
+
 	intel_runtime_pm_put(dev_priv);
 
 	return 0;
@@ -1274,6 +1267,7 @@ out_free_priv:
 void i915_driver_unload(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 
 	intel_fbdev_fini(dev);
 
@@ -1302,8 +1296,8 @@ void i915_driver_unload(struct drm_device *dev)
 	kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
 	dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
 
-	vga_switcheroo_unregister_client(dev->pdev);
-	vga_client_register(dev->pdev, NULL, NULL, NULL);
+	vga_switcheroo_unregister_client(pdev);
+	vga_client_register(pdev, NULL, NULL, NULL);
 
 	intel_csr_ucode_fini(dev_priv);
 
@@ -1400,6 +1394,7 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
 static int i915_drm_suspend(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	pci_power_t opregion_target_state;
 	int error;
 
@@ -1416,11 +1411,11 @@ static int i915_drm_suspend(struct drm_device *dev)
 
 	drm_kms_helper_poll_disable(dev);
 
-	pci_save_state(dev->pdev);
+	pci_save_state(pdev);
 
 	error = i915_gem_suspend(dev);
 	if (error) {
-		dev_err(&dev->pdev->dev,
+		dev_err(&pdev->dev,
 			"GEM idle failed, resume might fail\n");
 		goto out;
 	}
@@ -1462,9 +1457,10 @@ out:
 	return error;
 }
 
-static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
+static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 {
-	struct drm_i915_private *dev_priv = to_i915(drm_dev);
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	bool fw_csr;
 	int ret;
 
@@ -1498,7 +1494,7 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
 		goto out;
 	}
 
-	pci_disable_device(drm_dev->pdev);
+	pci_disable_device(pdev);
 	/*
 	 * During hibernation on some platforms the BIOS may try to access
 	 * the device even though it's already in D3 and hang the machine. So
@@ -1512,7 +1508,7 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
 	 * Acer Aspire 1830T
 	 */
 	if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
-		pci_set_power_state(drm_dev->pdev, PCI_D3hot);
+		pci_set_power_state(pdev, PCI_D3hot);
 
 	dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
 
@@ -1552,6 +1548,7 @@ static int i915_drm_resume(struct drm_device *dev)
 	int ret;
 
 	disable_rpm_wakeref_asserts(dev_priv);
+	intel_sanitize_gt_powersave(dev_priv);
 
 	ret = i915_ggtt_enable_hw(dev_priv);
 	if (ret)
@@ -1581,7 +1578,7 @@ static int i915_drm_resume(struct drm_device *dev)
 	mutex_lock(&dev->struct_mutex);
 	if (i915_gem_init_hw(dev)) {
 		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
-		atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+		i915_gem_set_wedged(dev_priv);
 	}
 	mutex_unlock(&dev->struct_mutex);
 
@@ -1629,6 +1626,7 @@ static int i915_drm_resume(struct drm_device *dev)
 static int i915_drm_resume_early(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	int ret;
 
 	/*
@@ -1651,7 +1649,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
 	 * the device powered we can also remove the following set power state
 	 * call.
 	 */
-	ret = pci_set_power_state(dev->pdev, PCI_D0);
+	ret = pci_set_power_state(pdev, PCI_D0);
 	if (ret) {
 		DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
 		goto out;
@@ -1670,12 +1668,12 @@ static int i915_drm_resume_early(struct drm_device *dev)
 	 * depend on the device enable refcount we can't anyway depend on them
 	 * disabling/enabling the device.
 	 */
-	if (pci_enable_device(dev->pdev)) {
+	if (pci_enable_device(pdev)) {
 		ret = -EIO;
 		goto out;
 	}
 
-	pci_set_master(dev->pdev);
+	pci_set_master(pdev);
 
 	disable_rpm_wakeref_asserts(dev_priv);
 
@@ -1727,8 +1725,10 @@ int i915_resume_switcheroo(struct drm_device *dev)
  * i915_reset - reset chip after a hang
  * @dev: drm device to reset
  *
- * Reset the chip.  Useful if a hang is detected. Returns zero on successful
- * reset or otherwise an error code.
+ * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
+ * on failure.
+ *
+ * Caller must hold the struct_mutex.
  *
  * Procedure is fairly simple:
  *   - reset the chip using the reset reg
@@ -1738,29 +1738,22 @@ int i915_resume_switcheroo(struct drm_device *dev)
  *   - re-init interrupt state
  *   - re-init display
  */
-int i915_reset(struct drm_i915_private *dev_priv)
+void i915_reset(struct drm_i915_private *dev_priv)
 {
 	struct drm_device *dev = &dev_priv->drm;
 	struct i915_gpu_error *error = &dev_priv->gpu_error;
-	unsigned reset_counter;
 	int ret;
 
-	mutex_lock(&dev->struct_mutex);
+	lockdep_assert_held(&dev->struct_mutex);
 
-	/* Clear any previous failed attempts at recovery. Time to try again. */
-	atomic_andnot(I915_WEDGED, &error->reset_counter);
+	if (!test_and_clear_bit(I915_RESET_IN_PROGRESS, &error->flags))
+		return;
 
-	/* Clear the reset-in-progress flag and increment the reset epoch. */
-	reset_counter = atomic_inc_return(&error->reset_counter);
-	if (WARN_ON(__i915_reset_in_progress(reset_counter))) {
-		ret = -EIO;
-		goto error;
-	}
+	/* Clear any previous failed attempts at recovery. Time to try again. */
+	__clear_bit(I915_WEDGED, &error->flags);
+	error->reset_count++;
 
 	pr_notice("drm/i915: Resetting chip after gpu hang\n");
-
-	i915_gem_reset(dev);
-
 	ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
 	if (ret) {
 		if (ret != -ENODEV)
@@ -1770,6 +1763,7 @@ int i915_reset(struct drm_i915_private *dev_priv)
 		goto error;
 	}
 
+	i915_gem_reset(dev_priv);
 	intel_overlay_reset(dev_priv);
 
 	/* Ok, now get things going again... */
@@ -1792,43 +1786,43 @@ int i915_reset(struct drm_i915_private *dev_priv)
 		goto error;
 	}
 
-	mutex_unlock(&dev->struct_mutex);
-
 	/*
 	 * rps/rc6 re-init is necessary to restore state lost after the
 	 * reset and the re-install of gt irqs. Skip for ironlake per
 	 * previous concerns that it doesn't respond well to some forms
 	 * of re-init after reset.
 	 */
+	intel_sanitize_gt_powersave(dev_priv);
 	intel_autoenable_gt_powersave(dev_priv);
 
-	return 0;
+wakeup:
+	wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
+	return;
 
 error:
-	atomic_or(I915_WEDGED, &error->reset_counter);
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
+	i915_gem_set_wedged(dev_priv);
+	goto wakeup;
 }
 
-static int i915_pm_suspend(struct device *dev)
+static int i915_pm_suspend(struct device *kdev)
 {
-	struct pci_dev *pdev = to_pci_dev(dev);
-	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	struct pci_dev *pdev = to_pci_dev(kdev);
+	struct drm_device *dev = pci_get_drvdata(pdev);
 
-	if (!drm_dev) {
-		dev_err(dev, "DRM not initialized, aborting suspend.\n");
+	if (!dev) {
+		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
 		return -ENODEV;
 	}
 
-	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
-	return i915_drm_suspend(drm_dev);
+	return i915_drm_suspend(dev);
 }
 
-static int i915_pm_suspend_late(struct device *dev)
+static int i915_pm_suspend_late(struct device *kdev)
 {
-	struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
+	struct drm_device *dev = &kdev_to_i915(kdev)->drm;
 
 	/*
 	 * We have a suspend ordering issue with the snd-hda driver also
@@ -1839,57 +1833,57 @@ static int i915_pm_suspend_late(struct device *dev)
 	 * FIXME: This should be solved with a special hdmi sink device or
 	 * similar so that power domains can be employed.
 	 */
-	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
-	return i915_drm_suspend_late(drm_dev, false);
+	return i915_drm_suspend_late(dev, false);
 }
 
-static int i915_pm_poweroff_late(struct device *dev)
+static int i915_pm_poweroff_late(struct device *kdev)
 {
-	struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
+	struct drm_device *dev = &kdev_to_i915(kdev)->drm;
 
-	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
-	return i915_drm_suspend_late(drm_dev, true);
+	return i915_drm_suspend_late(dev, true);
 }
 
-static int i915_pm_resume_early(struct device *dev)
+static int i915_pm_resume_early(struct device *kdev)
 {
-	struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
+	struct drm_device *dev = &kdev_to_i915(kdev)->drm;
 
-	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
-	return i915_drm_resume_early(drm_dev);
+	return i915_drm_resume_early(dev);
 }
 
-static int i915_pm_resume(struct device *dev)
+static int i915_pm_resume(struct device *kdev)
 {
-	struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
+	struct drm_device *dev = &kdev_to_i915(kdev)->drm;
 
-	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
-	return i915_drm_resume(drm_dev);
+	return i915_drm_resume(dev);
 }
 
 /* freeze: before creating the hibernation_image */
-static int i915_pm_freeze(struct device *dev)
+static int i915_pm_freeze(struct device *kdev)
 {
-	return i915_pm_suspend(dev);
+	return i915_pm_suspend(kdev);
 }
 
-static int i915_pm_freeze_late(struct device *dev)
+static int i915_pm_freeze_late(struct device *kdev)
 {
 	int ret;
 
-	ret = i915_pm_suspend_late(dev);
+	ret = i915_pm_suspend_late(kdev);
 	if (ret)
 		return ret;
 
-	ret = i915_gem_freeze_late(dev_to_i915(dev));
+	ret = i915_gem_freeze_late(kdev_to_i915(kdev));
 	if (ret)
 		return ret;
 
@@ -1897,25 +1891,25 @@ static int i915_pm_freeze_late(struct device *dev)
 }
 
 /* thaw: called after creating the hibernation image, but before turning off. */
-static int i915_pm_thaw_early(struct device *dev)
+static int i915_pm_thaw_early(struct device *kdev)
 {
-	return i915_pm_resume_early(dev);
+	return i915_pm_resume_early(kdev);
 }
 
-static int i915_pm_thaw(struct device *dev)
+static int i915_pm_thaw(struct device *kdev)
 {
-	return i915_pm_resume(dev);
+	return i915_pm_resume(kdev);
 }
 
 /* restore: called after loading the hibernation image. */
-static int i915_pm_restore_early(struct device *dev)
+static int i915_pm_restore_early(struct device *kdev)
 {
-	return i915_pm_resume_early(dev);
+	return i915_pm_resume_early(kdev);
 }
 
-static int i915_pm_restore(struct device *dev)
+static int i915_pm_restore(struct device *kdev)
 {
-	return i915_pm_resume(dev);
+	return i915_pm_resume(kdev);
 }
 
 /*
@@ -2277,9 +2271,9 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
 	return ret;
 }
 
-static int intel_runtime_suspend(struct device *device)
+static int intel_runtime_suspend(struct device *kdev)
 {
-	struct pci_dev *pdev = to_pci_dev(device);
+	struct pci_dev *pdev = to_pci_dev(kdev);
 	struct drm_device *dev = pci_get_drvdata(pdev);
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	int ret;
@@ -2305,7 +2299,7 @@ static int intel_runtime_suspend(struct device *device)
 		 * Bump the expiration timestamp, otherwise the suspend won't
 		 * be rescheduled.
 		 */
-		pm_runtime_mark_last_busy(device);
+		pm_runtime_mark_last_busy(kdev);
 
 		return -EAGAIN;
 	}
@@ -2384,9 +2378,9 @@ static int intel_runtime_suspend(struct device *device)
 	return 0;
 }
 
-static int intel_runtime_resume(struct device *device)
+static int intel_runtime_resume(struct device *kdev)
 {
-	struct pci_dev *pdev = to_pci_dev(device);
+	struct pci_dev *pdev = to_pci_dev(kdev);
 	struct drm_device *dev = pci_get_drvdata(pdev);
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	int ret = 0;

+ 115 - 112
drivers/gpu/drm/i915/i915_drv.h

@@ -70,7 +70,7 @@
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20160822"
+#define DRIVER_DATE		"20160919"
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
@@ -510,8 +510,12 @@ struct drm_i915_display_funcs {
 					 struct intel_initial_plane_config *);
 	int (*crtc_compute_clock)(struct intel_crtc *crtc,
 				  struct intel_crtc_state *crtc_state);
-	void (*crtc_enable)(struct drm_crtc *crtc);
-	void (*crtc_disable)(struct drm_crtc *crtc);
+	void (*crtc_enable)(struct intel_crtc_state *pipe_config,
+			    struct drm_atomic_state *old_state);
+	void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
+			     struct drm_atomic_state *old_state);
+	void (*update_crtcs)(struct drm_atomic_state *state,
+			     unsigned int *crtc_vblank_mask);
 	void (*audio_codec_enable)(struct drm_connector *connector,
 				   struct intel_encoder *encoder,
 				   const struct drm_display_mode *adjusted_mode);
@@ -575,8 +579,6 @@ struct intel_uncore_funcs {
 				uint16_t val, bool trace);
 	void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r,
 				uint32_t val, bool trace);
-	void (*mmio_writeq)(struct drm_i915_private *dev_priv, i915_reg_t r,
-				uint64_t val, bool trace);
 };
 
 struct intel_uncore {
@@ -637,7 +639,7 @@ struct intel_csr {
 	func(is_i915g) sep \
 	func(is_i945gm) sep \
 	func(is_g33) sep \
-	func(need_gfx_hws) sep \
+	func(hws_needs_physical) sep \
 	func(is_g4x) sep \
 	func(is_pineview) sep \
 	func(is_broadwater) sep \
@@ -652,6 +654,19 @@ struct intel_csr {
 	func(is_kabylake) sep \
 	func(is_preliminary) sep \
 	func(has_fbc) sep \
+	func(has_psr) sep \
+	func(has_runtime_pm) sep \
+	func(has_csr) sep \
+	func(has_resource_streamer) sep \
+	func(has_rc6) sep \
+	func(has_rc6p) sep \
+	func(has_dp_mst) sep \
+	func(has_gmbus_irq) sep \
+	func(has_hw_contexts) sep \
+	func(has_logical_ring_contexts) sep \
+	func(has_l3_dpf) sep \
+	func(has_gmch_display) sep \
+	func(has_guc) sep \
 	func(has_pipe_cxsr) sep \
 	func(has_hotplug) sep \
 	func(cursor_needs_physical) sep \
@@ -667,6 +682,24 @@ struct intel_csr {
 #define DEFINE_FLAG(name) u8 name:1
 #define SEP_SEMICOLON ;
 
+struct sseu_dev_info {
+	u8 slice_mask;
+	u8 subslice_mask;
+	u8 eu_total;
+	u8 eu_per_subslice;
+	u8 min_eu_in_pool;
+	/* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
+	u8 subslice_7eu[3];
+	u8 has_slice_pg:1;
+	u8 has_subslice_pg:1;
+	u8 has_eu_pg:1;
+};
+
+static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
+{
+	return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
+}
+
 struct intel_device_info {
 	u32 display_mmio_offset;
 	u16 device_id;
@@ -677,6 +710,7 @@ struct intel_device_info {
 	u8 ring_mask; /* Rings supported by the HW */
 	u8 num_rings;
 	DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
+	u16 ddb_size; /* in blocks */
 	/* Register offsets for the various display pipes and transcoders */
 	int pipe_offsets[I915_MAX_TRANSCODERS];
 	int trans_offsets[I915_MAX_TRANSCODERS];
@@ -684,17 +718,7 @@ struct intel_device_info {
 	int cursor_offsets[I915_MAX_PIPES];
 
 	/* Slice/subslice/EU info */
-	u8 slice_total;
-	u8 subslice_total;
-	u8 subslice_per_slice;
-	u8 eu_total;
-	u8 eu_per_subslice;
-	u8 min_eu_in_pool;
-	/* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
-	u8 subslice_7eu[3];
-	u8 has_slice_pg:1;
-	u8 has_subslice_pg:1;
-	u8 has_eu_pg:1;
+	struct sseu_dev_info sseu;
 
 	struct color_luts {
 		u16 degamma_lut_size;
@@ -1161,6 +1185,7 @@ struct intel_gen6_power_mgmt {
 	bool interrupts_enabled;
 	u32 pm_iir;
 
+	/* PM interrupt bits that should never be masked */
 	u32 pm_intr_keep;
 
 	/* Frequencies are stored in potentially platform dependent multiples.
@@ -1334,7 +1359,7 @@ struct i915_gem_mm {
 	bool interruptible;
 
 	/* the indicator for dispatch video commands on two BSD rings */
-	unsigned int bsd_engine_dispatch_index;
+	atomic_t bsd_engine_dispatch_index;
 
 	/** Bit 6 swizzling required for X tiling */
 	uint32_t bit_6_swizzle_x;
@@ -1382,9 +1407,10 @@ struct i915_gpu_error {
 	 * State variable controlling the reset flow and count
 	 *
 	 * This is a counter which gets incremented when reset is triggered,
-	 * and again when reset has been handled. So odd values (lowest bit set)
-	 * means that reset is in progress and even values that
-	 * (reset_counter >> 1):th reset was successfully completed.
+	 *
+	 * Before the reset commences, the I915_RESET_IN_PROGRESS bit is set
+	 * meaning that any waiters holding onto the struct_mutex should
+	 * relinquish the lock immediately in order for the reset to start.
 	 *
 	 * If reset is not completed succesfully, the I915_WEDGE bit is
 	 * set meaning that hardware is terminally sour and there is no
@@ -1399,10 +1425,11 @@ struct i915_gpu_error {
 	 * naturally enforces the correct ordering between the bail-out of the
 	 * waiter and the gpu reset work code.
 	 */
-	atomic_t reset_counter;
+	unsigned long reset_count;
 
-#define I915_RESET_IN_PROGRESS_FLAG	1
-#define I915_WEDGED			(1 << 31)
+	unsigned long flags;
+#define I915_RESET_IN_PROGRESS	0
+#define I915_WEDGED		(BITS_PER_LONG - 1)
 
 	/**
 	 * Waitqueue to signal when a hang is detected. Used to for waiters
@@ -1956,6 +1983,13 @@ struct drm_i915_private {
 	struct i915_suspend_saved_registers regfile;
 	struct vlv_s0ix_state vlv_s0ix_state;
 
+	enum {
+		I915_SKL_SAGV_UNKNOWN = 0,
+		I915_SKL_SAGV_DISABLED,
+		I915_SKL_SAGV_ENABLED,
+		I915_SKL_SAGV_NOT_CONTROLLED
+	} skl_sagv_status;
+
 	struct {
 		/*
 		 * Raw watermark latency values:
@@ -2010,6 +2044,7 @@ struct drm_i915_private {
 
 	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
 	struct {
+		void (*resume)(struct drm_i915_private *);
 		void (*cleanup_engine)(struct intel_engine_cs *engine);
 
 		/**
@@ -2057,9 +2092,9 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
 	return container_of(dev, struct drm_i915_private, drm);
 }
 
-static inline struct drm_i915_private *dev_to_i915(struct device *dev)
+static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
 {
-	return to_i915(dev_get_drvdata(dev));
+	return to_i915(dev_get_drvdata(kdev));
 }
 
 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
@@ -2082,13 +2117,16 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
 		for_each_if (((id__) = (engine__)->id, \
 			      intel_engine_initialized(engine__)))
 
+#define __mask_next_bit(mask) ({					\
+	int __idx = ffs(mask) - 1;					\
+	mask &= ~BIT(__idx);						\
+	__idx;								\
+})
+
 /* Iterator over subset of engines selected by mask */
-#define for_each_engine_masked(engine__, dev_priv__, mask__) \
-	for ((engine__) = &(dev_priv__)->engine[0]; \
-	     (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
-	     (engine__)++) \
-		for_each_if (((mask__) & intel_engine_flag(engine__)) && \
-			     intel_engine_initialized(engine__))
+#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
+	for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask;	\
+	     tmp__ ? (engine__ = &(dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
 
 enum hdmi_force_audio {
 	HDMI_AUDIO_OFF_DVI = -2,	/* no aux data for HDMI-DVI converter */
@@ -2549,7 +2587,7 @@ struct drm_i915_cmd_table {
 		BUILD_BUG(); \
 	__p; \
 })
-#define INTEL_INFO(p) 	(&__I915__(p)->info)
+#define INTEL_INFO(p)	(&__I915__(p)->info)
 #define INTEL_GEN(p)	(INTEL_INFO(p)->gen)
 #define INTEL_DEVID(p)	(INTEL_INFO(p)->device_id)
 
@@ -2716,10 +2754,10 @@ struct drm_i915_cmd_table {
 #define HAS_EDRAM(dev)		(!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
 #define HAS_WT(dev)		((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
 				 HAS_EDRAM(dev))
-#define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
+#define HWS_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->hws_needs_physical)
 
-#define HAS_HW_CONTEXTS(dev)	(INTEL_INFO(dev)->gen >= 6)
-#define HAS_LOGICAL_RING_CONTEXTS(dev)	(INTEL_INFO(dev)->gen >= 8)
+#define HAS_HW_CONTEXTS(dev)	(INTEL_INFO(dev)->has_hw_contexts)
+#define HAS_LOGICAL_RING_CONTEXTS(dev)	(INTEL_INFO(dev)->has_logical_ring_contexts)
 #define USES_PPGTT(dev)		(i915.enable_ppgtt)
 #define USES_FULL_PPGTT(dev)	(i915.enable_ppgtt >= 2)
 #define USES_FULL_48BIT_PPGTT(dev)	(i915.enable_ppgtt == 3)
@@ -2743,7 +2781,7 @@ struct drm_i915_cmd_table {
  * interrupt source and so prevents the other device from working properly.
  */
 #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
-#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
+#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->has_gmbus_irq)
 
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
  * rows, which changed the alignment requirements and fence programming.
@@ -2759,38 +2797,27 @@ struct drm_i915_cmd_table {
 
 #define HAS_IPS(dev)		(IS_HSW_ULT(dev) || IS_BROADWELL(dev))
 
-#define HAS_DP_MST(dev)		(IS_HASWELL(dev) || IS_BROADWELL(dev) || \
-				 INTEL_INFO(dev)->gen >= 9)
+#define HAS_DP_MST(dev)	(INTEL_INFO(dev)->has_dp_mst)
 
 #define HAS_DDI(dev)		(INTEL_INFO(dev)->has_ddi)
 #define HAS_FPGA_DBG_UNCLAIMED(dev)	(INTEL_INFO(dev)->has_fpga_dbg)
-#define HAS_PSR(dev)		(IS_HASWELL(dev) || IS_BROADWELL(dev) || \
-				 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
-				 IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
-#define HAS_RUNTIME_PM(dev)	(IS_GEN6(dev) || IS_HASWELL(dev) || \
-				 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
-				 IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
-				 IS_KABYLAKE(dev) || IS_BROXTON(dev))
-#define HAS_RC6(dev)		(INTEL_INFO(dev)->gen >= 6)
-#define HAS_RC6p(dev)		(IS_GEN6(dev) || IS_IVYBRIDGE(dev))
-
-#define HAS_CSR(dev)	(IS_GEN9(dev))
+#define HAS_PSR(dev)		(INTEL_INFO(dev)->has_psr)
+#define HAS_RUNTIME_PM(dev)	(INTEL_INFO(dev)->has_runtime_pm)
+#define HAS_RC6(dev)		(INTEL_INFO(dev)->has_rc6)
+#define HAS_RC6p(dev)		(INTEL_INFO(dev)->has_rc6p)
+
+#define HAS_CSR(dev)	(INTEL_INFO(dev)->has_csr)
 
 /*
  * For now, anything with a GuC requires uCode loading, and then supports
  * command submission once loaded. But these are logically independent
  * properties, so we have separate macros to test them.
  */
-#define HAS_GUC(dev)		(IS_GEN9(dev))
+#define HAS_GUC(dev)		(INTEL_INFO(dev)->has_guc)
 #define HAS_GUC_UCODE(dev)	(HAS_GUC(dev))
 #define HAS_GUC_SCHED(dev)	(HAS_GUC(dev))
 
-#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
-				    INTEL_INFO(dev)->gen >= 8)
-
-#define HAS_CORE_RING_FREQ(dev)	(INTEL_INFO(dev)->gen >= 6 && \
-				 !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \
-				 !IS_BROXTON(dev))
+#define HAS_RESOURCE_STREAMER(dev) (INTEL_INFO(dev)->has_resource_streamer)
 
 #define HAS_POOLED_EU(dev)	(INTEL_INFO(dev)->has_pooled_eu)
 
@@ -2818,11 +2845,10 @@ struct drm_i915_cmd_table {
 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
 
-#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || \
-			       IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->has_gmch_display)
 
 /* DPF == dynamic parity feature */
-#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+#define HAS_L3_DPF(dev) (INTEL_INFO(dev)->has_l3_dpf)
 #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
 
 #define GT_FREQUENCY_MULTIPLIER 50
@@ -2843,7 +2869,7 @@ extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
 extern int i915_resume_switcheroo(struct drm_device *dev);
 
 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
-			       	int enable_ppgtt);
+				int enable_ppgtt);
 
 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value);
 
@@ -2861,7 +2887,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
 #endif
 extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
 extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
-extern int i915_reset(struct drm_i915_private *dev_priv);
+extern void i915_reset(struct drm_i915_private *dev_priv);
 extern int intel_guc_reset(struct drm_i915_private *dev_priv);
 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -3197,8 +3223,6 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
 }
 
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-int i915_gem_object_sync(struct drm_i915_gem_object *obj,
-			 struct drm_i915_gem_request *to);
 void i915_vma_move_to_active(struct i915_vma *vma,
 			     struct drm_i915_gem_request *req,
 			     unsigned int flags);
@@ -3207,6 +3231,7 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
 			 struct drm_mode_create_dumb *args);
 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
 		      uint32_t handle, uint64_t *offset);
+int i915_gem_mmap_gtt_version(void);
 
 void i915_gem_track_fb(struct drm_i915_gem_object *old,
 		       struct drm_i915_gem_object *new,
@@ -3219,54 +3244,35 @@ i915_gem_find_active_request(struct intel_engine_cs *engine);
 
 void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
 
-static inline u32 i915_reset_counter(struct i915_gpu_error *error)
-{
-	return atomic_read(&error->reset_counter);
-}
-
-static inline bool __i915_reset_in_progress(u32 reset)
-{
-	return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG);
-}
-
-static inline bool __i915_reset_in_progress_or_wedged(u32 reset)
-{
-	return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
-}
-
-static inline bool __i915_terminally_wedged(u32 reset)
-{
-	return unlikely(reset & I915_WEDGED);
-}
-
 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
 {
-	return __i915_reset_in_progress(i915_reset_counter(error));
+	return unlikely(test_bit(I915_RESET_IN_PROGRESS, &error->flags));
 }
 
-static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
+static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
 {
-	return __i915_reset_in_progress_or_wedged(i915_reset_counter(error));
+	return unlikely(test_bit(I915_WEDGED, &error->flags));
 }
 
-static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
+static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
 {
-	return __i915_terminally_wedged(i915_reset_counter(error));
+	return i915_reset_in_progress(error) | i915_terminally_wedged(error);
 }
 
 static inline u32 i915_reset_count(struct i915_gpu_error *error)
 {
-	return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
+	return READ_ONCE(error->reset_count);
 }
 
-void i915_gem_reset(struct drm_device *dev);
+void i915_gem_reset(struct drm_i915_private *dev_priv);
+void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
 int __must_check i915_gem_init(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_engines(struct drm_device *dev);
 int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
-					bool interruptible);
+					unsigned int flags);
 int __must_check i915_gem_suspend(struct drm_device *dev);
 void i915_gem_resume(struct drm_device *dev);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
@@ -3388,7 +3394,6 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
 int __must_check i915_gem_context_init(struct drm_device *dev);
 void i915_gem_context_lost(struct drm_i915_private *dev_priv);
 void i915_gem_context_fini(struct drm_device *dev);
-void i915_gem_context_reset(struct drm_device *dev);
 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
 int i915_switch_context(struct drm_i915_gem_request *req);
@@ -3507,13 +3512,13 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec
 int i915_debugfs_register(struct drm_i915_private *dev_priv);
 void i915_debugfs_unregister(struct drm_i915_private *dev_priv);
 int i915_debugfs_connector_add(struct drm_connector *connector);
-void intel_display_crc_init(struct drm_device *dev);
+void intel_display_crc_init(struct drm_i915_private *dev_priv);
 #else
 static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;}
 static inline void i915_debugfs_unregister(struct drm_i915_private *dev_priv) {}
 static inline int i915_debugfs_connector_add(struct drm_connector *connector)
 { return 0; }
-static inline void intel_display_crc_init(struct drm_device *dev) {}
+static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
 #endif
 
 /* i915_gpu_error.c */
@@ -3557,8 +3562,8 @@ extern int i915_save_state(struct drm_device *dev);
 extern int i915_restore_state(struct drm_device *dev);
 
 /* i915_sysfs.c */
-void i915_setup_sysfs(struct drm_device *dev_priv);
-void i915_teardown_sysfs(struct drm_device *dev_priv);
+void i915_setup_sysfs(struct drm_i915_private *dev_priv);
+void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
 
 /* intel_i2c.c */
 extern int intel_setup_gmbus(struct drm_device *dev);
@@ -3735,9 +3740,16 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
  * will be implemented using 2 32-bit writes in an arbitrary order with
  * an arbitrary delay between them. This can cause the hardware to
  * act upon the intermediate value, possibly leading to corruption and
- * machine death. You have been warned.
+ * machine death. For this reason we do not support I915_WRITE64, or
+ * dev_priv->uncore.funcs.mmio_writeq.
+ *
+ * When reading a 64-bit value as two 32-bit values, the delay may cause
+ * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
+ * occasionally a 64-bit register does not actualy support a full readq
+ * and must be read using two 32-bit reads.
+ *
+ * You have been warned.
  */
-#define I915_WRITE64(reg, val)	dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
 #define I915_READ64(reg)	dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
 
 #define I915_READ64_2x32(lower_reg, upper_reg) ({			\
@@ -3780,7 +3792,7 @@ __raw_write(64, q)
 #undef __raw_write
 
 /* These are untraced mmio-accessors that are only valid to be used inside
- * criticial sections inside IRQ handlers where forcewake is explicitly
+ * critical sections inside IRQ handlers where forcewake is explicitly
  * controlled.
  * Think twice, and think again, before using these.
  * Note: Should only be used between intel_uncore_forcewake_irqlock() and
@@ -3852,7 +3864,9 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
 			    schedule_timeout_uninterruptible(remaining_jiffies);
 	}
 }
-static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
+
+static inline bool
+__i915_request_irq_complete(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
 
@@ -3914,17 +3928,6 @@ static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
 			return true;
 	}
 
-	/* We need to check whether any gpu reset happened in between
-	 * the request being submitted and now. If a reset has occurred,
-	 * the seqno will have been advance past ours and our request
-	 * is complete. If we are in the process of handling a reset,
-	 * the request is effectively complete as the rendering will
-	 * be discarded, but we need to return in order to drop the
-	 * struct_mutex.
-	 */
-	if (i915_reset_in_progress(&req->i915->gpu_error))
-		return true;
-
 	return false;
 }
 

+ 165 - 156
drivers/gpu/drm/i915/i915_gem.c

@@ -386,7 +386,8 @@ __unsafe_wait_rendering(struct drm_i915_gem_object *obj,
 		int ret;
 
 		ret = i915_gem_active_wait_unlocked(&active[idx],
-						    true, NULL, rps);
+						    I915_WAIT_INTERRUPTIBLE,
+						    NULL, rps);
 		if (ret)
 			return ret;
 	}
@@ -1679,6 +1680,56 @@ static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
 	return size >> PAGE_SHIFT;
 }
 
+/**
+ * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
+ *
+ * A history of the GTT mmap interface:
+ *
+ * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
+ *     aligned and suitable for fencing, and still fit into the available
+ *     mappable space left by the pinned display objects. A classic problem
+ *     we called the page-fault-of-doom where we would ping-pong between
+ *     two objects that could not fit inside the GTT and so the memcpy
+ *     would page one object in at the expense of the other between every
+ *     single byte.
+ *
+ * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
+ *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
+ *     object is too large for the available space (or simply too large
+ *     for the mappable aperture!), a view is created instead and faulted
+ *     into userspace. (This view is aligned and sized appropriately for
+ *     fenced access.)
+ *
+ * Restrictions:
+ *
+ *  * snoopable objects cannot be accessed via the GTT. It can cause machine
+ *    hangs on some architectures, corruption on others. An attempt to service
+ *    a GTT page fault from a snoopable object will generate a SIGBUS.
+ *
+ *  * the object must be able to fit into RAM (physical memory, though no
+ *    limited to the mappable aperture).
+ *
+ *
+ * Caveats:
+ *
+ *  * a new GTT page fault will synchronize rendering from the GPU and flush
+ *    all data to system memory. Subsequent access will not be synchronized.
+ *
+ *  * all mappings are revoked on runtime device suspend.
+ *
+ *  * there are only 8, 16 or 32 fence registers to share between all users
+ *    (older machines require fence register for display and blitter access
+ *    as well). Contention of the fence registers will cause the previous users
+ *    to be unmapped and any new access will generate new page faults.
+ *
+ *  * running out of memory while servicing a fault may generate a SIGBUS,
+ *    rather than the expected SIGSEGV.
+ */
+int i915_gem_mmap_gtt_version(void)
+{
+	return 1;
+}
+
 /**
  * i915_gem_fault - fault a page into the GTT
  * @area: CPU VMA in question
@@ -1694,6 +1745,9 @@ static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
  * from the GTT and/or fence registers to make room.  So performance may
  * suffer if the GTT working set is large or there are few fence registers
  * left.
+ *
+ * The current feature set supported by i915_gem_fault() and thus GTT mmaps
+ * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
  */
 int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
 {
@@ -1973,7 +2027,7 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
 	 * to claim that space for ourselves, we need to take the big
 	 * struct_mutex to free the requests+objects and allocate our slot.
 	 */
-	err = i915_gem_wait_for_idle(dev_priv, true);
+	err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
 	if (err)
 		return err;
 
@@ -2495,32 +2549,94 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
 		if (i915_gem_request_completed(request))
 			continue;
 
+		if (!i915_sw_fence_done(&request->submit))
+			break;
+
 		return request;
 	}
 
 	return NULL;
 }
 
-static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
+static void reset_request(struct drm_i915_gem_request *request)
+{
+	void *vaddr = request->ring->vaddr;
+	u32 head;
+
+	/* As this request likely depends on state from the lost
+	 * context, clear out all the user operations leaving the
+	 * breadcrumb at the end (so we get the fence notifications).
+	 */
+	head = request->head;
+	if (request->postfix < head) {
+		memset(vaddr + head, 0, request->ring->size - head);
+		head = 0;
+	}
+	memset(vaddr + head, 0, request->postfix - head);
+}
+
+static void i915_gem_reset_engine(struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_request *request;
+	struct i915_gem_context *incomplete_ctx;
 	bool ring_hung;
 
+	/* Ensure irq handler finishes, and not run again. */
+	tasklet_kill(&engine->irq_tasklet);
+	if (engine->irq_seqno_barrier)
+		engine->irq_seqno_barrier(engine);
+
 	request = i915_gem_find_active_request(engine);
-	if (request == NULL)
+	if (!request)
 		return;
 
 	ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
-
 	i915_set_reset_status(request->ctx, ring_hung);
+	if (!ring_hung)
+		return;
+
+	DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
+			 engine->name, request->fence.seqno);
+
+	/* Setup the CS to resume from the breadcrumb of the hung request */
+	engine->reset_hw(engine, request);
+
+	/* Users of the default context do not rely on logical state
+	 * preserved between batches. They have to emit full state on
+	 * every batch and so it is safe to execute queued requests following
+	 * the hang.
+	 *
+	 * Other contexts preserve state, now corrupt. We want to skip all
+	 * queued requests that reference the corrupt context.
+	 */
+	incomplete_ctx = request->ctx;
+	if (i915_gem_context_is_default(incomplete_ctx))
+		return;
+
 	list_for_each_entry_continue(request, &engine->request_list, link)
-		i915_set_reset_status(request->ctx, false);
+		if (request->ctx == incomplete_ctx)
+			reset_request(request);
 }
 
-static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
+void i915_gem_reset(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_gem_request *request;
-	struct intel_ring *ring;
+	struct intel_engine_cs *engine;
+
+	i915_gem_retire_requests(dev_priv);
+
+	for_each_engine(engine, dev_priv)
+		i915_gem_reset_engine(engine);
+
+	i915_gem_restore_fences(&dev_priv->drm);
+}
+
+static void nop_submit_request(struct drm_i915_gem_request *request)
+{
+}
+
+static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
+{
+	engine->submit_request = nop_submit_request;
 
 	/* Mark all pending requests as complete so that any concurrent
 	 * (lockless) lookup doesn't try and wait upon the request as we
@@ -2535,60 +2651,30 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
 	 */
 
 	if (i915.enable_execlists) {
-		/* Ensure irq handler finishes or is cancelled. */
-		tasklet_kill(&engine->irq_tasklet);
-
-		intel_execlists_cancel_requests(engine);
-	}
-
-	/*
-	 * We must free the requests after all the corresponding objects have
-	 * been moved off active lists. Which is the same order as the normal
-	 * retire_requests function does. This is important if object hold
-	 * implicit references on things like e.g. ppgtt address spaces through
-	 * the request.
-	 */
-	request = i915_gem_active_raw(&engine->last_request,
-				      &engine->i915->drm.struct_mutex);
-	if (request)
-		i915_gem_request_retire_upto(request);
-	GEM_BUG_ON(intel_engine_is_active(engine));
-
-	/* Having flushed all requests from all queues, we know that all
-	 * ringbuffers must now be empty. However, since we do not reclaim
-	 * all space when retiring the request (to prevent HEADs colliding
-	 * with rapid ringbuffer wraparound) the amount of available space
-	 * upon reset is less than when we start. Do one more pass over
-	 * all the ringbuffers to reset last_retired_head.
-	 */
-	list_for_each_entry(ring, &engine->buffers, link) {
-		ring->last_retired_head = ring->tail;
-		intel_ring_update_space(ring);
+		spin_lock(&engine->execlist_lock);
+		INIT_LIST_HEAD(&engine->execlist_queue);
+		i915_gem_request_put(engine->execlist_port[0].request);
+		i915_gem_request_put(engine->execlist_port[1].request);
+		memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
+		spin_unlock(&engine->execlist_lock);
 	}
 
 	engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
 }
 
-void i915_gem_reset(struct drm_device *dev)
+void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_engine_cs *engine;
 
-	/*
-	 * Before we free the objects from the requests, we need to inspect
-	 * them for finding the guilty party. As the requests only borrow
-	 * their reference to the objects, the inspection must be done first.
-	 */
-	for_each_engine(engine, dev_priv)
-		i915_gem_reset_engine_status(engine);
+	lockdep_assert_held(&dev_priv->drm.struct_mutex);
+	set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
 
+	i915_gem_context_lost(dev_priv);
 	for_each_engine(engine, dev_priv)
-		i915_gem_reset_engine_cleanup(engine);
+		i915_gem_cleanup_engine(engine);
 	mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
 
-	i915_gem_context_reset(dev);
-
-	i915_gem_restore_fences(dev);
+	i915_gem_retire_requests(dev_priv);
 }
 
 static void
@@ -2721,7 +2807,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 	active = __I915_BO_ACTIVE(obj);
 	for_each_active(active, idx) {
 		s64 *timeout = args->timeout_ns >= 0 ? &args->timeout_ns : NULL;
-		ret = i915_gem_active_wait_unlocked(&obj->last_read[idx], true,
+		ret = i915_gem_active_wait_unlocked(&obj->last_read[idx],
+						    I915_WAIT_INTERRUPTIBLE,
 						    timeout, rps);
 		if (ret)
 			break;
@@ -2731,96 +2818,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 	return ret;
 }
 
-static int
-__i915_gem_object_sync(struct drm_i915_gem_request *to,
-		       struct drm_i915_gem_request *from)
-{
-	int ret;
-
-	if (to->engine == from->engine)
-		return 0;
-
-	if (!i915.semaphores) {
-		ret = i915_wait_request(from,
-					from->i915->mm.interruptible,
-					NULL,
-					NO_WAITBOOST);
-		if (ret)
-			return ret;
-	} else {
-		int idx = intel_engine_sync_index(from->engine, to->engine);
-		if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
-			return 0;
-
-		trace_i915_gem_ring_sync_to(to, from);
-		ret = to->engine->semaphore.sync_to(to, from);
-		if (ret)
-			return ret;
-
-		from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
-	}
-
-	return 0;
-}
-
-/**
- * i915_gem_object_sync - sync an object to a ring.
- *
- * @obj: object which may be in use on another ring.
- * @to: request we are wishing to use
- *
- * This code is meant to abstract object synchronization with the GPU.
- * Conceptually we serialise writes between engines inside the GPU.
- * We only allow one engine to write into a buffer at any time, but
- * multiple readers. To ensure each has a coherent view of memory, we must:
- *
- * - If there is an outstanding write request to the object, the new
- *   request must wait for it to complete (either CPU or in hw, requests
- *   on the same ring will be naturally ordered).
- *
- * - If we are a write request (pending_write_domain is set), the new
- *   request must wait for outstanding read requests to complete.
- *
- * Returns 0 if successful, else propagates up the lower layer error.
- */
-int
-i915_gem_object_sync(struct drm_i915_gem_object *obj,
-		     struct drm_i915_gem_request *to)
-{
-	struct i915_gem_active *active;
-	unsigned long active_mask;
-	int idx;
-
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
-
-	active_mask = i915_gem_object_get_active(obj);
-	if (!active_mask)
-		return 0;
-
-	if (obj->base.pending_write_domain) {
-		active = obj->last_read;
-	} else {
-		active_mask = 1;
-		active = &obj->last_write;
-	}
-
-	for_each_active(active_mask, idx) {
-		struct drm_i915_gem_request *request;
-		int ret;
-
-		request = i915_gem_active_peek(&active[idx],
-					       &obj->base.dev->struct_mutex);
-		if (!request)
-			continue;
-
-		ret = __i915_gem_object_sync(to, request);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
 static void __i915_vma_iounmap(struct i915_vma *vma)
 {
 	GEM_BUG_ON(i915_vma_is_pinned(vma));
@@ -2924,7 +2921,7 @@ destroy:
 }
 
 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
-			   bool interruptible)
+			   unsigned int flags)
 {
 	struct intel_engine_cs *engine;
 	int ret;
@@ -2933,7 +2930,7 @@ int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
 		if (engine->last_context == NULL)
 			continue;
 
-		ret = intel_engine_idle(engine, interruptible);
+		ret = intel_engine_idle(engine, flags);
 		if (ret)
 			return ret;
 	}
@@ -3688,7 +3685,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 	if (target == NULL)
 		return 0;
 
-	ret = i915_wait_request(target, true, NULL, NULL);
+	ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
 	i915_gem_request_put(target);
 
 	return ret;
@@ -4244,7 +4241,9 @@ int i915_gem_suspend(struct drm_device *dev)
 	if (ret)
 		goto err;
 
-	ret = i915_gem_wait_for_idle(dev_priv, true);
+	ret = i915_gem_wait_for_idle(dev_priv,
+				     I915_WAIT_INTERRUPTIBLE |
+				     I915_WAIT_LOCKED);
 	if (ret)
 		goto err;
 
@@ -4280,8 +4279,7 @@ void i915_gem_resume(struct drm_device *dev)
 	 * guarantee that the context image is complete. So let's just reset
 	 * it and start again.
 	 */
-	if (i915.enable_execlists)
-		intel_lr_context_reset(dev_priv, dev_priv->kernel_context);
+	dev_priv->gt.resume(dev_priv);
 
 	mutex_unlock(&dev->struct_mutex);
 }
@@ -4433,8 +4431,10 @@ int i915_gem_init(struct drm_device *dev)
 	mutex_lock(&dev->struct_mutex);
 
 	if (!i915.enable_execlists) {
+		dev_priv->gt.resume = intel_legacy_submission_resume;
 		dev_priv->gt.cleanup_engine = intel_engine_cleanup;
 	} else {
+		dev_priv->gt.resume = intel_lr_context_resume;
 		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
 	}
 
@@ -4467,7 +4467,7 @@ int i915_gem_init(struct drm_device *dev)
 		 * for all other failure, such as an allocation failure, bail.
 		 */
 		DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
-		atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+		i915_gem_set_wedged(dev_priv);
 		ret = 0;
 	}
 
@@ -4569,6 +4569,8 @@ i915_gem_load_init(struct drm_device *dev)
 
 	dev_priv->mm.interruptible = true;
 
+	atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
+
 	spin_lock_init(&dev_priv->fb_tracking.lock);
 }
 
@@ -4587,6 +4589,11 @@ void i915_gem_load_cleanup(struct drm_device *dev)
 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
 {
 	struct drm_i915_gem_object *obj;
+	struct list_head *phases[] = {
+		&dev_priv->mm.unbound_list,
+		&dev_priv->mm.bound_list,
+		NULL
+	}, **p;
 
 	/* Called just before we write the hibernation image.
 	 *
@@ -4597,16 +4604,18 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
 	 *
 	 * To make sure the hibernation image contains the latest state,
 	 * we update that state just before writing out the image.
+	 *
+	 * To try and reduce the hibernation image, we manually shrink
+	 * the objects as well.
 	 */
 
-	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
-		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
-		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-	}
+	i915_gem_shrink_all(dev_priv);
 
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
-		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+	for (p = phases; *p; p++) {
+		list_for_each_entry(obj, *p, global_list) {
+			obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+			obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+		}
 	}
 
 	return 0;

+ 0 - 16
drivers/gpu/drm/i915/i915_gem_context.c

@@ -420,22 +420,6 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx,
 	}
 }
 
-void i915_gem_context_reset(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
-	lockdep_assert_held(&dev->struct_mutex);
-
-	if (i915.enable_execlists) {
-		struct i915_gem_context *ctx;
-
-		list_for_each_entry(ctx, &dev_priv->context_list, link)
-			intel_lr_context_reset(dev_priv, ctx);
-	}
-
-	i915_gem_context_lost(dev_priv);
-}
-
 int i915_gem_context_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);

+ 6 - 2
drivers/gpu/drm/i915/i915_gem_evict.c

@@ -170,7 +170,9 @@ search_again:
 	if (ret)
 		return ret;
 
-	ret = i915_gem_wait_for_idle(dev_priv, true);
+	ret = i915_gem_wait_for_idle(dev_priv,
+				     I915_WAIT_INTERRUPTIBLE |
+				     I915_WAIT_LOCKED);
 	if (ret)
 		return ret;
 
@@ -275,7 +277,9 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
 				return ret;
 		}
 
-		ret = i915_gem_wait_for_idle(dev_priv, true);
+		ret = i915_gem_wait_for_idle(dev_priv,
+					     I915_WAIT_INTERRUPTIBLE |
+					     I915_WAIT_LOCKED);
 		if (ret)
 			return ret;
 

+ 17 - 12
drivers/gpu/drm/i915/i915_gem_execbuffer.c

@@ -1131,13 +1131,25 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 
 	list_for_each_entry(vma, vmas, exec_list) {
 		struct drm_i915_gem_object *obj = vma->obj;
+		struct reservation_object *resv;
 
 		if (obj->flags & other_rings) {
-			ret = i915_gem_object_sync(obj, req);
+			ret = i915_gem_request_await_object
+				(req, obj, obj->base.pending_write_domain);
 			if (ret)
 				return ret;
 		}
 
+		resv = i915_gem_object_get_dmabuf_resv(obj);
+		if (resv) {
+			ret = i915_sw_fence_await_reservation
+				(&req->submit, resv, &i915_fence_ops,
+				 obj->base.pending_write_domain, 10*HZ,
+				 GFP_KERNEL | __GFP_NOWARN);
+			if (ret < 0)
+				return ret;
+		}
+
 		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
 			i915_gem_clflush_object(obj, false);
 	}
@@ -1253,12 +1265,9 @@ static struct i915_gem_context *
 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
 			  struct intel_engine_cs *engine, const u32 ctx_id)
 {
-	struct i915_gem_context *ctx = NULL;
+	struct i915_gem_context *ctx;
 	struct i915_ctx_hang_stats *hs;
 
-	if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
-		return ERR_PTR(-EINVAL);
-
 	ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
 	if (IS_ERR(ctx))
 		return ctx;
@@ -1538,13 +1547,9 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 
 	/* Check whether the file_priv has already selected one ring. */
-	if ((int)file_priv->bsd_engine < 0) {
-		/* If not, use the ping-pong mechanism to select one. */
-		mutex_lock(&dev_priv->drm.struct_mutex);
-		file_priv->bsd_engine = dev_priv->mm.bsd_engine_dispatch_index;
-		dev_priv->mm.bsd_engine_dispatch_index ^= 1;
-		mutex_unlock(&dev_priv->drm.struct_mutex);
-	}
+	if ((int)file_priv->bsd_engine < 0)
+		file_priv->bsd_engine = atomic_fetch_xor(1,
+			 &dev_priv->mm.bsd_engine_dispatch_index);
 
 	return file_priv->bsd_engine;
 }

+ 70 - 71
drivers/gpu/drm/i915/i915_gem_gtt.c

@@ -32,6 +32,8 @@
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
+
 /**
  * DOC: Global GTT views
  *
@@ -122,8 +124,11 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
 	has_full_48bit_ppgtt =
 	       	IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
 
-	if (intel_vgpu_active(dev_priv))
-		has_full_ppgtt = false; /* emulation is too hard */
+	if (intel_vgpu_active(dev_priv)) {
+		/* emulation is too hard */
+		has_full_ppgtt = false;
+		has_full_48bit_ppgtt = false;
+	}
 
 	if (!has_aliasing_ppgtt)
 		return 0;
@@ -158,7 +163,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
 		return 0;
 	}
 
-	if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists)
+	if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
 		return has_full_48bit_ppgtt ? 3 : 2;
 	else
 		return has_aliasing_ppgtt ? 1 : 0;
@@ -326,16 +331,16 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
 static int __setup_page_dma(struct drm_device *dev,
 			    struct i915_page_dma *p, gfp_t flags)
 {
-	struct device *device = &dev->pdev->dev;
+	struct device *kdev = &dev->pdev->dev;
 
 	p->page = alloc_page(flags);
 	if (!p->page)
 		return -ENOMEM;
 
-	p->daddr = dma_map_page(device,
+	p->daddr = dma_map_page(kdev,
 				p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
 
-	if (dma_mapping_error(device, p->daddr)) {
+	if (dma_mapping_error(kdev, p->daddr)) {
 		__free_page(p->page);
 		return -EINVAL;
 	}
@@ -345,15 +350,17 @@ static int __setup_page_dma(struct drm_device *dev,
 
 static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
 {
-	return __setup_page_dma(dev, p, GFP_KERNEL);
+	return __setup_page_dma(dev, p, I915_GFP_DMA);
 }
 
 static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
 {
+	struct pci_dev *pdev = dev->pdev;
+
 	if (WARN_ON(!p->page))
 		return;
 
-	dma_unmap_page(&dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+	dma_unmap_page(&pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
 	__free_page(p->page);
 	memset(p, 0, sizeof(*p));
 }
@@ -407,33 +414,18 @@ static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
 	fill_page_dma(dev, p, v);
 }
 
-static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev)
+static int
+setup_scratch_page(struct drm_device *dev,
+		   struct i915_page_dma *scratch,
+		   gfp_t gfp)
 {
-	struct i915_page_scratch *sp;
-	int ret;
-
-	sp = kzalloc(sizeof(*sp), GFP_KERNEL);
-	if (sp == NULL)
-		return ERR_PTR(-ENOMEM);
-
-	ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
-	if (ret) {
-		kfree(sp);
-		return ERR_PTR(ret);
-	}
-
-	set_pages_uc(px_page(sp), 1);
-
-	return sp;
+	return __setup_page_dma(dev, scratch, gfp | __GFP_ZERO);
 }
 
-static void free_scratch_page(struct drm_device *dev,
-			      struct i915_page_scratch *sp)
+static void cleanup_scratch_page(struct drm_device *dev,
+				 struct i915_page_dma *scratch)
 {
-	set_pages_wb(px_page(sp), 1);
-
-	cleanup_px(dev, sp);
-	kfree(sp);
+	cleanup_page_dma(dev, scratch);
 }
 
 static struct i915_page_table *alloc_pt(struct drm_device *dev)
@@ -479,7 +471,7 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
 {
 	gen8_pte_t scratch_pte;
 
-	scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+	scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
 				      I915_CACHE_LLC, true);
 
 	fill_px(vm->dev, pt, scratch_pte);
@@ -490,9 +482,9 @@ static void gen6_initialize_pt(struct i915_address_space *vm,
 {
 	gen6_pte_t scratch_pte;
 
-	WARN_ON(px_dma(vm->scratch_page) == 0);
+	WARN_ON(vm->scratch_page.daddr == 0);
 
-	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
 				     I915_CACHE_LLC, true, 0);
 
 	fill32_px(vm->dev, pt, scratch_pte);
@@ -776,7 +768,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
 				   bool use_scratch)
 {
 	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
-	gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+	gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
 						 I915_CACHE_LLC, use_scratch);
 
 	if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
@@ -882,9 +874,9 @@ static int gen8_init_scratch(struct i915_address_space *vm)
 	struct drm_device *dev = vm->dev;
 	int ret;
 
-	vm->scratch_page = alloc_scratch_page(dev);
-	if (IS_ERR(vm->scratch_page))
-		return PTR_ERR(vm->scratch_page);
+	ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA);
+	if (ret)
+		return ret;
 
 	vm->scratch_pt = alloc_pt(dev);
 	if (IS_ERR(vm->scratch_pt)) {
@@ -918,7 +910,7 @@ free_pd:
 free_pt:
 	free_pt(dev, vm->scratch_pt);
 free_scratch_page:
-	free_scratch_page(dev, vm->scratch_page);
+	cleanup_scratch_page(dev, &vm->scratch_page);
 
 	return ret;
 }
@@ -962,7 +954,7 @@ static void gen8_free_scratch(struct i915_address_space *vm)
 		free_pdp(dev, vm->scratch_pdp);
 	free_pd(dev, vm->scratch_pd);
 	free_pt(dev, vm->scratch_pt);
-	free_scratch_page(dev, vm->scratch_page);
+	cleanup_scratch_page(dev, &vm->scratch_page);
 }
 
 static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
@@ -1459,7 +1451,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 	struct i915_address_space *vm = &ppgtt->base;
 	uint64_t start = ppgtt->base.start;
 	uint64_t length = ppgtt->base.total;
-	gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+	gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
 						 I915_CACHE_LLC, true);
 
 	if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
@@ -1576,7 +1568,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 	uint32_t  pte, pde;
 	uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
 
-	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
 				     I915_CACHE_LLC, true, 0);
 
 	gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
@@ -1801,7 +1793,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
 	unsigned first_pte = first_entry % GEN6_PTES;
 	unsigned last_pte, i;
 
-	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
 				     I915_CACHE_LLC, true, 0);
 
 	while (num_entries) {
@@ -1947,14 +1939,15 @@ unwind_out:
 static int gen6_init_scratch(struct i915_address_space *vm)
 {
 	struct drm_device *dev = vm->dev;
+	int ret;
 
-	vm->scratch_page = alloc_scratch_page(dev);
-	if (IS_ERR(vm->scratch_page))
-		return PTR_ERR(vm->scratch_page);
+	ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA);
+	if (ret)
+		return ret;
 
 	vm->scratch_pt = alloc_pt(dev);
 	if (IS_ERR(vm->scratch_pt)) {
-		free_scratch_page(dev, vm->scratch_page);
+		cleanup_scratch_page(dev, &vm->scratch_page);
 		return PTR_ERR(vm->scratch_pt);
 	}
 
@@ -1968,7 +1961,7 @@ static void gen6_free_scratch(struct i915_address_space *vm)
 	struct drm_device *dev = vm->dev;
 
 	free_pt(dev, vm->scratch_pt);
-	free_scratch_page(dev, vm->scratch_page);
+	cleanup_scratch_page(dev, &vm->scratch_page);
 }
 
 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
@@ -2311,12 +2304,7 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
 
 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
 {
-#ifdef writeq
 	writeq(pte, addr);
-#else
-	iowrite32((u32)pte, addr);
-	iowrite32(pte >> 32, addr + 4);
-#endif
 }
 
 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
@@ -2509,7 +2497,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
 		 first_entry, num_entries, max_entries))
 		num_entries = max_entries;
 
-	scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+	scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
 				      I915_CACHE_LLC,
 				      use_scratch);
 	for (i = 0; i < num_entries; i++)
@@ -2541,7 +2529,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
 		 first_entry, num_entries, max_entries))
 		num_entries = max_entries;
 
-	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
 				     I915_CACHE_LLC, use_scratch, 0);
 
 	for (i = 0; i < num_entries; i++)
@@ -2685,19 +2673,19 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
 
 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct device *kdev = &dev_priv->drm.pdev->dev;
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
 	if (unlikely(ggtt->do_idle_maps)) {
-		if (i915_gem_wait_for_idle(dev_priv, false)) {
+		if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) {
 			DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
 			/* Wait a bit, in hopes it avoids the hang */
 			udelay(10);
 		}
 	}
 
-	dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
+	dma_unmap_sg(kdev, obj->pages->sgl, obj->pages->nents,
 		     PCI_DMA_BIDIRECTIONAL);
 }
 
@@ -2894,8 +2882,8 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
 {
 	struct pci_dev *pdev = ggtt->base.dev->pdev;
-	struct i915_page_scratch *scratch_page;
 	phys_addr_t phys_addr;
+	int ret;
 
 	/* For Modern GENs the PTEs and register space are split in the BAR */
 	phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
@@ -2916,16 +2904,16 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
 		return -ENOMEM;
 	}
 
-	scratch_page = alloc_scratch_page(ggtt->base.dev);
-	if (IS_ERR(scratch_page)) {
+	ret = setup_scratch_page(ggtt->base.dev,
+				 &ggtt->base.scratch_page,
+				 GFP_DMA32);
+	if (ret) {
 		DRM_ERROR("Scratch setup failed\n");
 		/* iounmap will also get called at remove, but meh */
 		iounmap(ggtt->gsm);
-		return PTR_ERR(scratch_page);
+		return ret;
 	}
 
-	ggtt->base.scratch_page = scratch_page;
-
 	return 0;
 }
 
@@ -3007,7 +2995,7 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
 
 	iounmap(ggtt->gsm);
-	free_scratch_page(vm->dev, vm->scratch_page);
+	cleanup_scratch_page(vm->dev, &vm->scratch_page);
 }
 
 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
@@ -3244,8 +3232,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	struct drm_i915_gem_object *obj;
-	struct i915_vma *vma;
+	struct drm_i915_gem_object *obj, *on;
 
 	i915_check_and_clear_faults(dev_priv);
 
@@ -3253,20 +3240,32 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 	ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
 			       true);
 
-	/* Cache flush objects bound into GGTT and rebind them. */
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+	ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
+
+	/* clflush objects bound into the GGTT and rebind them. */
+	list_for_each_entry_safe(obj, on,
+				 &dev_priv->mm.bound_list, global_list) {
+		bool ggtt_bound = false;
+		struct i915_vma *vma;
+
 		list_for_each_entry(vma, &obj->vma_list, obj_link) {
 			if (vma->vm != &ggtt->base)
 				continue;
 
+			if (!i915_vma_unbind(vma))
+				continue;
+
 			WARN_ON(i915_vma_bind(vma, obj->cache_level,
 					      PIN_UPDATE));
+			ggtt_bound = true;
 		}
 
-		if (obj->pin_display)
+		if (ggtt_bound)
 			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
 	}
 
+	ggtt->base.closed = false;
+
 	if (INTEL_INFO(dev)->gen >= 8) {
 		if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
 			chv_setup_private_ppat(dev_priv);

+ 1 - 5
drivers/gpu/drm/i915/i915_gem_gtt.h

@@ -312,10 +312,6 @@ struct i915_page_dma {
 #define px_page(px) (px_base(px)->page)
 #define px_dma(px) (px_base(px)->daddr)
 
-struct i915_page_scratch {
-	struct i915_page_dma base;
-};
-
 struct i915_page_table {
 	struct i915_page_dma base;
 
@@ -361,7 +357,7 @@ struct i915_address_space {
 
 	bool closed;
 
-	struct i915_page_scratch *scratch_page;
+	struct i915_page_dma scratch_page;
 	struct i915_page_table *scratch_pt;
 	struct i915_page_directory *scratch_pd;
 	struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */

+ 195 - 34
drivers/gpu/drm/i915/i915_gem_request.c

@@ -233,16 +233,18 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
 	} while (tmp != req);
 }
 
-static int i915_gem_check_wedge(unsigned int reset_counter, bool interruptible)
+static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
 {
-	if (__i915_terminally_wedged(reset_counter))
+	struct i915_gpu_error *error = &dev_priv->gpu_error;
+
+	if (i915_terminally_wedged(error))
 		return -EIO;
 
-	if (__i915_reset_in_progress(reset_counter)) {
+	if (i915_reset_in_progress(error)) {
 		/* Non-interruptible callers can't handle -EAGAIN, hence return
 		 * -EIO unconditionally for these.
 		 */
-		if (!interruptible)
+		if (!dev_priv->mm.interruptible)
 			return -EIO;
 
 		return -EAGAIN;
@@ -258,7 +260,9 @@ static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
 
 	/* Carefully retire all requests without writing to the rings */
 	for_each_engine(engine, dev_priv) {
-		ret = intel_engine_idle(engine, true);
+		ret = intel_engine_idle(engine,
+					I915_WAIT_INTERRUPTIBLE |
+					I915_WAIT_LOCKED);
 		if (ret)
 			return ret;
 	}
@@ -314,6 +318,26 @@ static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
 	return 0;
 }
 
+static int __i915_sw_fence_call
+submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+	struct drm_i915_gem_request *request =
+		container_of(fence, typeof(*request), submit);
+
+	/* Will be called from irq-context when using foreign DMA fences */
+
+	switch (state) {
+	case FENCE_COMPLETE:
+		request->engine->submit_request(request);
+		break;
+
+	case FENCE_FREE:
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
 /**
  * i915_gem_request_alloc - allocate a request structure
  *
@@ -331,7 +355,6 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 		       struct i915_gem_context *ctx)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	unsigned int reset_counter = i915_reset_counter(&dev_priv->gpu_error);
 	struct drm_i915_gem_request *req;
 	u32 seqno;
 	int ret;
@@ -340,7 +363,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 	 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
 	 * and restart.
 	 */
-	ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
+	ret = i915_gem_check_wedge(dev_priv);
 	if (ret)
 		return ERR_PTR(ret);
 
@@ -393,6 +416,8 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 		   engine->fence_context,
 		   seqno);
 
+	i915_sw_fence_init(&req->submit, submit_notify);
+
 	INIT_LIST_HEAD(&req->active_list);
 	req->i915 = dev_priv;
 	req->engine = engine;
@@ -402,7 +427,6 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 	req->previous_context = NULL;
 	req->file_priv = NULL;
 	req->batch = NULL;
-	req->elsp_submitted = 0;
 
 	/*
 	 * Reserve space in the ring buffer for all the commands required to
@@ -436,6 +460,94 @@ err:
 	return ERR_PTR(ret);
 }
 
+static int
+i915_gem_request_await_request(struct drm_i915_gem_request *to,
+			       struct drm_i915_gem_request *from)
+{
+	int idx, ret;
+
+	GEM_BUG_ON(to == from);
+
+	if (to->engine == from->engine)
+		return 0;
+
+	idx = intel_engine_sync_index(from->engine, to->engine);
+	if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
+		return 0;
+
+	trace_i915_gem_ring_sync_to(to, from);
+	if (!i915.semaphores) {
+		if (!i915_spin_request(from, TASK_INTERRUPTIBLE, 2)) {
+			ret = i915_sw_fence_await_dma_fence(&to->submit,
+							    &from->fence, 0,
+							    GFP_KERNEL);
+			if (ret < 0)
+				return ret;
+		}
+	} else {
+		ret = to->engine->semaphore.sync_to(to, from);
+		if (ret)
+			return ret;
+	}
+
+	from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
+	return 0;
+}
+
+/**
+ * i915_gem_request_await_object - set this request to (async) wait upon a bo
+ *
+ * @to: request we are wishing to use
+ * @obj: object which may be in use on another ring.
+ *
+ * This code is meant to abstract object synchronization with the GPU.
+ * Conceptually we serialise writes between engines inside the GPU.
+ * We only allow one engine to write into a buffer at any time, but
+ * multiple readers. To ensure each has a coherent view of memory, we must:
+ *
+ * - If there is an outstanding write request to the object, the new
+ *   request must wait for it to complete (either CPU or in hw, requests
+ *   on the same ring will be naturally ordered).
+ *
+ * - If we are a write request (pending_write_domain is set), the new
+ *   request must wait for outstanding read requests to complete.
+ *
+ * Returns 0 if successful, else propagates up the lower layer error.
+ */
+int
+i915_gem_request_await_object(struct drm_i915_gem_request *to,
+			      struct drm_i915_gem_object *obj,
+			      bool write)
+{
+	struct i915_gem_active *active;
+	unsigned long active_mask;
+	int idx;
+
+	if (write) {
+		active_mask = i915_gem_object_get_active(obj);
+		active = obj->last_read;
+	} else {
+		active_mask = 1;
+		active = &obj->last_write;
+	}
+
+	for_each_active(active_mask, idx) {
+		struct drm_i915_gem_request *request;
+		int ret;
+
+		request = i915_gem_active_peek(&active[idx],
+					       &obj->base.dev->struct_mutex);
+		if (!request)
+			continue;
+
+		ret = i915_gem_request_await_request(to, request);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
 static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
@@ -466,10 +578,13 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 {
 	struct intel_engine_cs *engine = request->engine;
 	struct intel_ring *ring = request->ring;
+	struct drm_i915_gem_request *prev;
 	u32 request_start;
 	u32 reserved_tail;
 	int ret;
 
+	trace_i915_gem_request_add(request);
+
 	/*
 	 * To ensure that this call will not fail, space for its emissions
 	 * should already have been reserved in the ring buffer. Let the ring
@@ -493,20 +608,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 		WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
 	}
 
-	trace_i915_gem_request_add(request);
-
-	/* Seal the request and mark it as pending execution. Note that
-	 * we may inspect this state, without holding any locks, during
-	 * hangcheck. Hence we apply the barrier to ensure that we do not
-	 * see a more recent value in the hws than we are tracking.
-	 */
-	request->emitted_jiffies = jiffies;
-	request->previous_seqno = engine->last_submitted_seqno;
-	engine->last_submitted_seqno = request->fence.seqno;
-	i915_gem_active_set(&engine->last_request, request);
-	list_add_tail(&request->link, &engine->request_list);
-	list_add_tail(&request->ring_link, &ring->request_list);
-
 	/* Record the position of the start of the breadcrumb so that
 	 * should we detect the updated seqno part-way through the
 	 * GPU processing the request, we never over-estimate the
@@ -527,8 +628,40 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 		  "for adding the request (%d bytes)\n",
 		  reserved_tail, ret);
 
+	/* Seal the request and mark it as pending execution. Note that
+	 * we may inspect this state, without holding any locks, during
+	 * hangcheck. Hence we apply the barrier to ensure that we do not
+	 * see a more recent value in the hws than we are tracking.
+	 */
+
+	prev = i915_gem_active_raw(&engine->last_request,
+				   &request->i915->drm.struct_mutex);
+	if (prev)
+		i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
+					     &request->submitq);
+
+	request->emitted_jiffies = jiffies;
+	request->previous_seqno = engine->last_submitted_seqno;
+	engine->last_submitted_seqno = request->fence.seqno;
+	i915_gem_active_set(&engine->last_request, request);
+	list_add_tail(&request->link, &engine->request_list);
+	list_add_tail(&request->ring_link, &ring->request_list);
+
 	i915_gem_mark_busy(engine);
-	engine->submit_request(request);
+
+	local_bh_disable();
+	i915_sw_fence_commit(&request->submit);
+	local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
+}
+
+static void reset_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&q->lock, flags);
+	if (list_empty(&wait->task_list))
+		__add_wait_queue(q, wait);
+	spin_unlock_irqrestore(&q->lock, flags);
 }
 
 static unsigned long local_clock_us(unsigned int *cpu)
@@ -598,7 +731,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
 /**
  * i915_wait_request - wait until execution of request has finished
  * @req: duh!
- * @interruptible: do an interruptible wait (normally yes)
+ * @flags: how to wait
  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
  * @rps: client to charge for RPS boosting
  *
@@ -613,17 +746,22 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
  * errno with remaining time filled in timeout argument.
  */
 int i915_wait_request(struct drm_i915_gem_request *req,
-		      bool interruptible,
+		      unsigned int flags,
 		      s64 *timeout,
 		      struct intel_rps_client *rps)
 {
-	int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+	const int state = flags & I915_WAIT_INTERRUPTIBLE ?
+		TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
 	DEFINE_WAIT(reset);
 	struct intel_wait wait;
 	unsigned long timeout_remain;
 	int ret = 0;
 
 	might_sleep();
+#if IS_ENABLED(CONFIG_LOCKDEP)
+	GEM_BUG_ON(!!lockdep_is_held(&req->i915->drm.struct_mutex) !=
+		   !!(flags & I915_WAIT_LOCKED));
+#endif
 
 	if (i915_gem_request_completed(req))
 		return 0;
@@ -666,7 +804,8 @@ int i915_wait_request(struct drm_i915_gem_request *req,
 		goto complete;
 
 	set_current_state(state);
-	add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
+	if (flags & I915_WAIT_LOCKED)
+		add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
 
 	intel_wait_init(&wait, req->fence.seqno);
 	if (intel_engine_add_wait(req->engine, &wait))
@@ -702,14 +841,35 @@ wakeup:
 		if (__i915_request_irq_complete(req))
 			break;
 
+		/* If the GPU is hung, and we hold the lock, reset the GPU
+		 * and then check for completion. On a full reset, the engine's
+		 * HW seqno will be advanced passed us and we are complete.
+		 * If we do a partial reset, we have to wait for the GPU to
+		 * resume and update the breadcrumb.
+		 *
+		 * If we don't hold the mutex, we can just wait for the worker
+		 * to come along and update the breadcrumb (either directly
+		 * itself, or indirectly by recovering the GPU).
+		 */
+		if (flags & I915_WAIT_LOCKED &&
+		    i915_reset_in_progress(&req->i915->gpu_error)) {
+			__set_current_state(TASK_RUNNING);
+			i915_reset(req->i915);
+			reset_wait_queue(&req->i915->gpu_error.wait_queue,
+					 &reset);
+			continue;
+		}
+
 		/* Only spin if we know the GPU is processing this request */
 		if (i915_spin_request(req, state, 2))
 			break;
 	}
-	remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
 
 	intel_engine_remove_wait(req->engine, &wait);
+	if (flags & I915_WAIT_LOCKED)
+		remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
 	__set_current_state(TASK_RUNNING);
+
 complete:
 	trace_i915_gem_request_wait_end(req);
 
@@ -749,21 +909,24 @@ complete:
 	return ret;
 }
 
-static void engine_retire_requests(struct intel_engine_cs *engine)
+static bool engine_retire_requests(struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_request *request, *next;
 
 	list_for_each_entry_safe(request, next, &engine->request_list, link) {
 		if (!i915_gem_request_completed(request))
-			break;
+			return false;
 
 		i915_gem_request_retire(request);
 	}
+
+	return true;
 }
 
 void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
+	unsigned int tmp;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
@@ -772,11 +935,9 @@ void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
 
 	GEM_BUG_ON(!dev_priv->gt.awake);
 
-	for_each_engine(engine, dev_priv) {
-		engine_retire_requests(engine);
-		if (!intel_engine_is_active(engine))
+	for_each_engine_masked(engine, dev_priv, dev_priv->gt.active_engines, tmp)
+		if (engine_retire_requests(engine))
 			dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
-	}
 
 	if (dev_priv->gt.active_engines == 0)
 		queue_delayed_work(dev_priv->wq,

+ 44 - 32
drivers/gpu/drm/i915/i915_gem_request.h

@@ -28,6 +28,7 @@
 #include <linux/fence.h>
 
 #include "i915_gem.h"
+#include "i915_sw_fence.h"
 
 struct intel_wait {
 	struct rb_node node;
@@ -82,26 +83,32 @@ struct drm_i915_gem_request {
 	struct intel_ring *ring;
 	struct intel_signal_node signaling;
 
+	struct i915_sw_fence submit;
+	wait_queue_t submitq;
+
 	/** GEM sequence number associated with the previous request,
 	 * when the HWS breadcrumb is equal to this the GPU is processing
 	 * this request.
 	 */
 	u32 previous_seqno;
 
-	/** Position in the ringbuffer of the start of the request */
+	/** Position in the ring of the start of the request */
 	u32 head;
 
 	/**
-	 * Position in the ringbuffer of the start of the postfix.
-	 * This is required to calculate the maximum available ringbuffer
-	 * space without overwriting the postfix.
+	 * Position in the ring of the start of the postfix.
+	 * This is required to calculate the maximum available ring space
+	 * without overwriting the postfix.
 	 */
 	u32 postfix;
 
-	/** Position in the ringbuffer of the end of the whole request */
+	/** Position in the ring of the end of the whole request */
 	u32 tail;
 
-	/** Preallocate space in the ringbuffer for the emitting the request */
+	/** Position in the ring of the end of any workarounds after the tail */
+	u32 wa_tail;
+
+	/** Preallocate space in the ring for the emitting the request */
 	u32 reserved_space;
 
 	/**
@@ -134,27 +141,8 @@ struct drm_i915_gem_request {
 	/** file_priv list entry for this request */
 	struct list_head client_list;
 
-	/**
-	 * The ELSP only accepts two elements at a time, so we queue
-	 * context/tail pairs on a given queue (ring->execlist_queue) until the
-	 * hardware is available. The queue serves a double purpose: we also use
-	 * it to keep track of the up to 2 contexts currently in the hardware
-	 * (usually one in execution and the other queued up by the GPU): We
-	 * only remove elements from the head of the queue when the hardware
-	 * informs us that an element has been completed.
-	 *
-	 * All accesses to the queue are mediated by a spinlock
-	 * (ring->execlist_lock).
-	 */
-
-	/** Execlist link in the submission queue.*/
+	/** Link in the execlist submission queue, guarded by execlist_lock. */
 	struct list_head execlist_link;
-
-	/** Execlists no. of times this request has been sent to the ELSP */
-	int elsp_submitted;
-
-	/** Execlists context hardware id. */
-	unsigned int ctx_hw_id;
 };
 
 extern const struct fence_ops i915_fence_ops;
@@ -222,6 +210,11 @@ static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
 	*pdst = src;
 }
 
+int
+i915_gem_request_await_object(struct drm_i915_gem_request *to,
+			      struct drm_i915_gem_object *obj,
+			      bool write);
+
 void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
 #define i915_add_request(req) \
 	__i915_add_request(req, true)
@@ -234,10 +227,12 @@ struct intel_rps_client;
 #define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
 
 int i915_wait_request(struct drm_i915_gem_request *req,
-		      bool interruptible,
+		      unsigned int flags,
 		      s64 *timeout,
 		      struct intel_rps_client *rps)
 	__attribute__((nonnull(1)));
+#define I915_WAIT_INTERRUPTIBLE	BIT(0)
+#define I915_WAIT_LOCKED	BIT(1) /* struct_mutex held, handle GPU reset */
 
 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
 
@@ -472,6 +467,19 @@ __i915_gem_active_get_rcu(const struct i915_gem_active *active)
 		if (!request || i915_gem_request_completed(request))
 			return NULL;
 
+		/* An especially silly compiler could decide to recompute the
+		 * result of i915_gem_request_completed, more specifically
+		 * re-emit the load for request->fence.seqno. A race would catch
+		 * a later seqno value, which could flip the result from true to
+		 * false. Which means part of the instructions below might not
+		 * be executed, while later on instructions are executed. Due to
+		 * barriers within the refcounting the inconsistency can't reach
+		 * past the call to i915_gem_request_get_rcu, but not executing
+		 * that while still executing i915_gem_request_put() creates
+		 * havoc enough.  Prevent this with a compiler barrier.
+		 */
+		barrier();
+
 		request = i915_gem_request_get_rcu(request);
 
 		/* What stops the following rcu_access_pointer() from occurring
@@ -578,13 +586,15 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
 	if (!request)
 		return 0;
 
-	return i915_wait_request(request, true, NULL, NULL);
+	return i915_wait_request(request,
+				 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+				 NULL, NULL);
 }
 
 /**
  * i915_gem_active_wait_unlocked - waits until the request is completed
  * @active - the active request on which to wait
- * @interruptible - whether the wait can be woken by a userspace signal
+ * @flags - how to wait
  * @timeout - how long to wait at most
  * @rps - userspace client to charge for a waitboost
  *
@@ -605,7 +615,7 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
  */
 static inline int
 i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
-			      bool interruptible,
+			      unsigned int flags,
 			      s64 *timeout,
 			      struct intel_rps_client *rps)
 {
@@ -614,7 +624,7 @@ i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
 
 	request = i915_gem_active_get_unlocked(active);
 	if (request) {
-		ret = i915_wait_request(request, interruptible, timeout, rps);
+		ret = i915_wait_request(request, flags, timeout, rps);
 		i915_gem_request_put(request);
 	}
 
@@ -641,7 +651,9 @@ i915_gem_active_retire(struct i915_gem_active *active,
 	if (!request)
 		return 0;
 
-	ret = i915_wait_request(request, true, NULL, NULL);
+	ret = i915_wait_request(request,
+				I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+				NULL, NULL);
 	if (ret)
 		return ret;
 

+ 2 - 2
drivers/gpu/drm/i915/i915_gem_shrinker.c

@@ -323,7 +323,7 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
 	unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
 
 	do {
-		if (i915_gem_wait_for_idle(dev_priv, false) == 0 &&
+		if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
 		    i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock))
 			break;
 
@@ -414,7 +414,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
 		return NOTIFY_DONE;
 
 	/* Force everything onto the inactive lists */
-	ret = i915_gem_wait_for_idle(dev_priv, false);
+	ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
 	if (ret)
 		goto out;
 

+ 10 - 9
drivers/gpu/drm/i915/i915_gem_stolen.c

@@ -92,6 +92,7 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
 static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct resource *r;
 	u32 base;
@@ -111,7 +112,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 	if (INTEL_INFO(dev)->gen >= 3) {
 		u32 bsm;
 
-		pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm);
+		pci_read_config_dword(pdev, INTEL_BSM, &bsm);
 
 		base = bsm & INTEL_BSM_MASK;
 	} else if (IS_I865G(dev)) {
@@ -119,7 +120,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 		u16 toud = 0;
 		u8 tmp;
 
-		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
 					 I845_ESMRAMC, &tmp);
 
 		if (tmp & TSEG_ENABLE) {
@@ -133,7 +134,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 			}
 		}
 
-		pci_bus_read_config_word(dev->pdev->bus, PCI_DEVFN(0, 0),
+		pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0),
 					 I865_TOUD, &toud);
 
 		base = (toud << 16) + tseg_size;
@@ -142,13 +143,13 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 		u32 tom;
 		u8 tmp;
 
-		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
 					 I85X_ESMRAMC, &tmp);
 
 		if (tmp & TSEG_ENABLE)
 			tseg_size = MB(1);
 
-		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 1),
+		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 1),
 					 I85X_DRB3, &tmp);
 		tom = tmp * MB(32);
 
@@ -158,7 +159,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 		u32 tom;
 		u8 tmp;
 
-		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
 					 I845_ESMRAMC, &tmp);
 
 		if (tmp & TSEG_ENABLE) {
@@ -172,7 +173,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 			}
 		}
 
-		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
 					 I830_DRB3, &tmp);
 		tom = tmp * MB(32);
 
@@ -182,7 +183,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 		u32 tom;
 		u8 tmp;
 
-		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
 					 I830_ESMRAMC, &tmp);
 
 		if (tmp & TSEG_ENABLE) {
@@ -192,7 +193,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 				tseg_size = KB(512);
 		}
 
-		pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+		pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
 					 I830_DRB3, &tmp);
 		tom = tmp * MB(32);
 

+ 1 - 1
drivers/gpu/drm/i915/i915_gem_userptr.c

@@ -68,7 +68,7 @@ static void wait_rendering(struct drm_i915_gem_object *obj)
 
 	for_each_active(active, idx)
 		i915_gem_active_wait_unlocked(&obj->last_read[idx],
-					      false, NULL, NULL);
+					      0, NULL, NULL);
 }
 
 static void cancel_userptr(struct work_struct *work)

+ 26 - 10
drivers/gpu/drm/i915/i915_gpu_error.c

@@ -336,6 +336,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 {
 	struct drm_device *dev = error_priv->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	struct drm_i915_error_state *error = error_priv->error;
 	struct drm_i915_error_object *obj;
 	int i, j, offset, elt;
@@ -367,11 +368,11 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 	}
 	err_printf(m, "Reset count: %u\n", error->reset_count);
 	err_printf(m, "Suspend count: %u\n", error->suspend_count);
-	err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
-	err_printf(m, "PCI Revision: 0x%02x\n", dev->pdev->revision);
+	err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
+	err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
 	err_printf(m, "PCI Subsystem: %04x:%04x\n",
-		   dev->pdev->subsystem_vendor,
-		   dev->pdev->subsystem_device);
+		   pdev->subsystem_vendor,
+		   pdev->subsystem_device);
 	err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
 
 	if (HAS_CSR(dev)) {
@@ -488,7 +489,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 			}
 		}
 
-		if (ee->num_waiters) {
+		if (IS_ERR(ee->waiters)) {
+			err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
+				   dev_priv->engine[i].name);
+		} else if (ee->num_waiters) {
 			err_printf(m, "%s --- %d waiters\n",
 				   dev_priv->engine[i].name,
 				   ee->num_waiters);
@@ -647,7 +651,8 @@ static void i915_error_state_free(struct kref *error_ref)
 		i915_error_object_free(ee->wa_ctx);
 
 		kfree(ee->requests);
-		kfree(ee->waiters);
+		if (!IS_ERR_OR_NULL(ee->waiters))
+			kfree(ee->waiters);
 	}
 
 	i915_error_object_free(error->semaphore);
@@ -932,7 +937,14 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
 	ee->num_waiters = 0;
 	ee->waiters = NULL;
 
-	spin_lock(&b->lock);
+	if (RB_EMPTY_ROOT(&b->waiters))
+		return;
+
+	if (!spin_trylock(&b->lock)) {
+		ee->waiters = ERR_PTR(-EDEADLK);
+		return;
+	}
+
 	count = 0;
 	for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
 		count++;
@@ -946,9 +958,13 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
 	if (!waiter)
 		return;
 
-	ee->waiters = waiter;
+	if (!spin_trylock(&b->lock)) {
+		kfree(waiter);
+		ee->waiters = ERR_PTR(-EDEADLK);
+		return;
+	}
 
-	spin_lock(&b->lock);
+	ee->waiters = waiter;
 	for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
 		struct intel_wait *w = container_of(rb, typeof(*w), node);
 
@@ -1009,7 +1025,7 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
 	if (INTEL_GEN(dev_priv) > 2)
 		ee->mode = I915_READ_MODE(engine);
 
-	if (I915_NEED_GFX_HWS(dev_priv)) {
+	if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
 		i915_reg_t mmio;
 
 		if (IS_GEN7(dev_priv)) {

+ 0 - 3
drivers/gpu/drm/i915/i915_guc_reg.h

@@ -103,9 +103,6 @@
 #define HOST2GUC_INTERRUPT		_MMIO(0xc4c8)
 #define   HOST2GUC_TRIGGER		  (1<<0)
 
-#define DRBMISC1			0x1984
-#define   DOORBELL_ENABLE		  (1<<0)
-
 #define GEN8_DRBREGL(x)			_MMIO(0x1000 + (x) * 8)
 #define   GEN8_DRB_VALID		  (1<<0)
 #define GEN8_DRBREGU(x)			_MMIO(0x1000 + (x) * 8 + 4)

+ 71 - 55
drivers/gpu/drm/i915/i915_guc_submission.c

@@ -59,7 +59,7 @@
  * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
  * represents in-order queue. The kernel driver packs ring tail pointer and an
  * ELSP context descriptor dword into Work Item.
- * See guc_add_workqueue_item()
+ * See guc_wq_item_append()
  *
  */
 
@@ -114,10 +114,8 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
 		if (ret != -ETIMEDOUT)
 			ret = -EIO;
 
-		DRM_ERROR("GUC: host2guc action 0x%X failed. ret=%d "
-				"status=0x%08X response=0x%08X\n",
-				data[0], ret, status,
-				I915_READ(SOFT_SCRATCH(15)));
+		DRM_WARN("Action 0x%X failed; ret=%d status=0x%08X response=0x%08X\n",
+			 data[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
 
 		dev_priv->guc.action_fail += 1;
 		dev_priv->guc.action_err = ret;
@@ -290,7 +288,7 @@ static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
 /*
  * Initialise the process descriptor shared with the GuC firmware.
  */
-static void guc_init_proc_desc(struct intel_guc *guc,
+static void guc_proc_desc_init(struct intel_guc *guc,
 			       struct i915_guc_client *client)
 {
 	struct guc_process_desc *desc;
@@ -322,7 +320,7 @@ static void guc_init_proc_desc(struct intel_guc *guc,
  * write queue, etc).
  */
 
-static void guc_init_ctx_desc(struct intel_guc *guc,
+static void guc_ctx_desc_init(struct intel_guc *guc,
 			      struct i915_guc_client *client)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -330,6 +328,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
 	struct i915_gem_context *ctx = client->owner;
 	struct guc_context_desc desc;
 	struct sg_table *sg;
+	unsigned int tmp;
 	u32 gfx_addr;
 
 	memset(&desc, 0, sizeof(desc));
@@ -339,7 +338,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
 	desc.priority = client->priority;
 	desc.db_id = client->doorbell_id;
 
-	for_each_engine_masked(engine, dev_priv, client->engines) {
+	for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
 		struct intel_context *ce = &ctx->engine[engine->id];
 		uint32_t guc_engine_id = engine->guc_id;
 		struct guc_execlist_context *lrc = &desc.lrc[guc_engine_id];
@@ -400,7 +399,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
 			     sizeof(desc) * client->ctx_index);
 }
 
-static void guc_fini_ctx_desc(struct intel_guc *guc,
+static void guc_ctx_desc_fini(struct intel_guc *guc,
 			      struct i915_guc_client *client)
 {
 	struct guc_context_desc desc;
@@ -414,7 +413,7 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
 }
 
 /**
- * i915_guc_wq_check_space() - check that the GuC can accept a request
+ * i915_guc_wq_reserve() - reserve space in the GuC's workqueue
  * @request:	request associated with the commands
  *
  * Return:	0 if space is available
@@ -422,35 +421,39 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
  *
  * This function must be called (and must return 0) before a request
  * is submitted to the GuC via i915_guc_submit() below. Once a result
- * of 0 has been returned, it remains valid until (but only until)
- * the next call to submit().
+ * of 0 has been returned, it must be balanced by a corresponding
+ * call to submit().
  *
- * This precheck allows the caller to determine in advance that space
+ * Reservation allows the caller to determine in advance that space
  * will be available for the next submission before committing resources
  * to it, and helps avoid late failures with complicated recovery paths.
  */
-int i915_guc_wq_check_space(struct drm_i915_gem_request *request)
+int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
 {
 	const size_t wqi_size = sizeof(struct guc_wq_item);
 	struct i915_guc_client *gc = request->i915->guc.execbuf_client;
-	struct guc_process_desc *desc;
+	struct guc_process_desc *desc = gc->client_base + gc->proc_desc_offset;
 	u32 freespace;
+	int ret;
 
-	GEM_BUG_ON(gc == NULL);
-
-	desc = gc->client_base + gc->proc_desc_offset;
-
+	spin_lock(&gc->wq_lock);
 	freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
-	if (likely(freespace >= wqi_size))
-		return 0;
-
-	gc->no_wq_space += 1;
+	freespace -= gc->wq_rsvd;
+	if (likely(freespace >= wqi_size)) {
+		gc->wq_rsvd += wqi_size;
+		ret = 0;
+	} else {
+		gc->no_wq_space++;
+		ret = -EAGAIN;
+	}
+	spin_unlock(&gc->wq_lock);
 
-	return -EAGAIN;
+	return ret;
 }
 
-static void guc_add_workqueue_item(struct i915_guc_client *gc,
-				   struct drm_i915_gem_request *rq)
+/* Construct a Work Item and append it to the GuC's Work Queue */
+static void guc_wq_item_append(struct i915_guc_client *gc,
+			       struct drm_i915_gem_request *rq)
 {
 	/* wqi_len is in DWords, and does not include the one-word header */
 	const size_t wqi_size = sizeof(struct guc_wq_item);
@@ -463,7 +466,7 @@ static void guc_add_workqueue_item(struct i915_guc_client *gc,
 
 	desc = gc->client_base + gc->proc_desc_offset;
 
-	/* Free space is guaranteed, see i915_guc_wq_check_space() above */
+	/* Free space is guaranteed, see i915_guc_wq_reserve() above */
 	freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
 	GEM_BUG_ON(freespace < wqi_size);
 
@@ -481,12 +484,14 @@ static void guc_add_workqueue_item(struct i915_guc_client *gc,
 	 * workqueue buffer dw by dw.
 	 */
 	BUILD_BUG_ON(wqi_size != 16);
+	GEM_BUG_ON(gc->wq_rsvd < wqi_size);
 
 	/* postincrement WQ tail for next time */
 	wq_off = gc->wq_tail;
+	GEM_BUG_ON(wq_off & (wqi_size - 1));
 	gc->wq_tail += wqi_size;
 	gc->wq_tail &= gc->wq_size - 1;
-	GEM_BUG_ON(wq_off & (wqi_size - 1));
+	gc->wq_rsvd -= wqi_size;
 
 	/* WQ starts from the page after doorbell / process_desc */
 	wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT;
@@ -551,8 +556,8 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
 		if (db_ret.db_status == GUC_DOORBELL_DISABLED)
 			break;
 
-		DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n",
-			  db_cmp.cookie, db_ret.cookie);
+		DRM_WARN("Cookie mismatch. Expected %d, found %d\n",
+			 db_cmp.cookie, db_ret.cookie);
 
 		/* update the cookie to newly read cookie from GuC */
 		db_cmp.cookie = db_ret.cookie;
@@ -571,14 +576,13 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
  * Return:	0 on success, otherwise an errno.
  * 		(Note: nonzero really shouldn't happen!)
  *
- * The caller must have already called i915_guc_wq_check_space() above
- * with a result of 0 (success) since the last request submission. This
- * guarantees that there is space in the work queue for the new request,
- * so enqueuing the item cannot fail.
+ * The caller must have already called i915_guc_wq_reserve() above with
+ * a result of 0 (success), guaranteeing that there is space in the work
+ * queue for the new request, so enqueuing the item cannot fail.
  *
  * Bad Things Will Happen if the caller violates this protocol e.g. calls
- * submit() when check() says there's no space, or calls submit() multiple
- * times with no intervening check().
+ * submit() when _reserve() says there's no space, or calls _submit()
+ * a different number of times from (successful) calls to _reserve().
  *
  * The only error here arises if the doorbell hardware isn't functioning
  * as expected, which really shouln't happen.
@@ -590,7 +594,8 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
 	struct i915_guc_client *client = guc->execbuf_client;
 	int b_ret;
 
-	guc_add_workqueue_item(client, rq);
+	spin_lock(&client->wq_lock);
+	guc_wq_item_append(client, rq);
 	b_ret = guc_ring_doorbell(client);
 
 	client->submissions[engine_id] += 1;
@@ -600,6 +605,7 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
 
 	guc->submissions[engine_id] += 1;
 	guc->last_seqno[engine_id] = rq->fence.seqno;
+	spin_unlock(&client->wq_lock);
 }
 
 /*
@@ -680,7 +686,7 @@ guc_client_free(struct drm_i915_private *dev_priv,
 	i915_vma_unpin_and_release(&client->vma);
 
 	if (client->ctx_index != GUC_INVALID_CTX_ID) {
-		guc_fini_ctx_desc(guc, client);
+		guc_ctx_desc_fini(guc, client);
 		ida_simple_remove(&guc->ctx_ids, client->ctx_index);
 	}
 
@@ -733,8 +739,8 @@ static void guc_init_doorbell_hw(struct intel_guc *guc)
 	/* Restore to original value */
 	err = guc_update_doorbell_id(guc, client, db_id);
 	if (err)
-		DRM_ERROR("Failed to restore doorbell to %d, err %d\n",
-			db_id, err);
+		DRM_WARN("Failed to restore doorbell to %d, err %d\n",
+			 db_id, err);
 
 	/* Read back & verify all doorbell registers */
 	for (i = 0; i < GUC_MAX_DOORBELLS; ++i)
@@ -790,6 +796,8 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
 	/* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
 	client->vma = vma;
 	client->client_base = kmap(i915_vma_first_page(vma));
+
+	spin_lock_init(&client->wq_lock);
 	client->wq_offset = GUC_DB_SIZE;
 	client->wq_size = GUC_WQ_SIZE;
 
@@ -810,8 +818,8 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
 	else
 		client->proc_desc_offset = (GUC_DB_SIZE / 2);
 
-	guc_init_proc_desc(guc, client);
-	guc_init_ctx_desc(guc, client);
+	guc_proc_desc_init(guc, client);
+	guc_ctx_desc_init(guc, client);
 	if (guc_init_doorbell(guc, client, db_id))
 		goto err;
 
@@ -823,13 +831,11 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
 	return client;
 
 err:
-	DRM_ERROR("FAILED to create priority %u GuC client!\n", priority);
-
 	guc_client_free(dev_priv, client);
 	return NULL;
 }
 
-static void guc_create_log(struct intel_guc *guc)
+static void guc_log_create(struct intel_guc *guc)
 {
 	struct i915_vma *vma;
 	unsigned long offset;
@@ -869,7 +875,7 @@ static void guc_create_log(struct intel_guc *guc)
 	guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
 }
 
-static void init_guc_policies(struct guc_policies *policies)
+static void guc_policies_init(struct guc_policies *policies)
 {
 	struct guc_policy *policy;
 	u32 p, i;
@@ -891,7 +897,7 @@ static void init_guc_policies(struct guc_policies *policies)
 	policies->is_valid = 1;
 }
 
-static void guc_create_ads(struct intel_guc *guc)
+static void guc_addon_create(struct intel_guc *guc)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	struct i915_vma *vma;
@@ -934,7 +940,7 @@ static void guc_create_ads(struct intel_guc *guc)
 
 	/* GuC scheduling policies */
 	policies = (void *)ads + sizeof(struct guc_ads);
-	init_guc_policies(policies);
+	guc_policies_init(policies);
 
 	ads->scheduler_policies =
 		i915_ggtt_offset(vma) + sizeof(struct guc_ads);
@@ -965,9 +971,11 @@ static void guc_create_ads(struct intel_guc *guc)
  */
 int i915_guc_submission_init(struct drm_i915_private *dev_priv)
 {
+	const size_t ctxsize = sizeof(struct guc_context_desc);
+	const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
+	const size_t gemsize = round_up(poolsize, PAGE_SIZE);
 	struct intel_guc *guc = &dev_priv->guc;
 	struct i915_vma *vma;
-	u32 size;
 
 	/* Wipe bitmap & delete client in case of reinitialisation */
 	bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS);
@@ -979,15 +987,14 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
 	if (guc->ctx_pool_vma)
 		return 0; /* already allocated */
 
-	size = PAGE_ALIGN(GUC_MAX_GPU_CONTEXTS*sizeof(struct guc_context_desc));
-	vma = guc_allocate_vma(guc, size);
+	vma = guc_allocate_vma(guc, gemsize);
 	if (IS_ERR(vma))
 		return PTR_ERR(vma);
 
 	guc->ctx_pool_vma = vma;
 	ida_init(&guc->ctx_ids);
-	guc_create_log(guc);
-	guc_create_ads(guc);
+	guc_log_create(guc);
+	guc_addon_create(guc);
 
 	return 0;
 }
@@ -997,6 +1004,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
 	struct intel_guc *guc = &dev_priv->guc;
 	struct i915_guc_client *client;
 	struct intel_engine_cs *engine;
+	struct drm_i915_gem_request *request;
 
 	/* client for execbuf submission */
 	client = guc_client_alloc(dev_priv,
@@ -1004,7 +1012,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
 				  GUC_CTX_PRIORITY_KMD_NORMAL,
 				  dev_priv->kernel_context);
 	if (!client) {
-		DRM_ERROR("Failed to create execbuf guc_client\n");
+		DRM_ERROR("Failed to create normal GuC client!\n");
 		return -ENOMEM;
 	}
 
@@ -1013,9 +1021,17 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
 	guc_init_doorbell_hw(guc);
 
 	/* Take over from manual control of ELSP (execlists) */
-	for_each_engine(engine, dev_priv)
+	for_each_engine(engine, dev_priv) {
 		engine->submit_request = i915_guc_submit;
 
+		/* Replay the current set of previously submitted requests */
+		list_for_each_entry(request, &engine->request_list, link) {
+			client->wq_rsvd += sizeof(struct guc_wq_item);
+			if (i915_sw_fence_done(&request->submit))
+				i915_guc_submit(request);
+		}
+	}
+
 	return 0;
 }
 

+ 62 - 64
drivers/gpu/drm/i915/i915_irq.c

@@ -371,7 +371,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
 	spin_lock_irq(&dev_priv->irq_lock);
 	dev_priv->rps.interrupts_enabled = false;
 
-	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
+	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
 
 	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
 	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
@@ -2497,57 +2497,52 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
-	int ret;
 
 	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
 
+	DRM_DEBUG_DRIVER("resetting chip\n");
+	kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
+
 	/*
-	 * Note that there's only one work item which does gpu resets, so we
-	 * need not worry about concurrent gpu resets potentially incrementing
-	 * error->reset_counter twice. We only need to take care of another
-	 * racing irq/hangcheck declaring the gpu dead for a second time. A
-	 * quick check for that is good enough: schedule_work ensures the
-	 * correct ordering between hang detection and this work item, and since
-	 * the reset in-progress bit is only ever set by code outside of this
-	 * work we don't need to worry about any other races.
+	 * In most cases it's guaranteed that we get here with an RPM
+	 * reference held, for example because there is a pending GPU
+	 * request that won't finish until the reset is done. This
+	 * isn't the case at least when we get here by doing a
+	 * simulated reset via debugs, so get an RPM reference.
 	 */
-	if (i915_reset_in_progress(&dev_priv->gpu_error)) {
-		DRM_DEBUG_DRIVER("resetting chip\n");
-		kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
-
-		/*
-		 * In most cases it's guaranteed that we get here with an RPM
-		 * reference held, for example because there is a pending GPU
-		 * request that won't finish until the reset is done. This
-		 * isn't the case at least when we get here by doing a
-		 * simulated reset via debugs, so get an RPM reference.
-		 */
-		intel_runtime_pm_get(dev_priv);
-
-		intel_prepare_reset(dev_priv);
+	intel_runtime_pm_get(dev_priv);
+	intel_prepare_reset(dev_priv);
 
+	do {
 		/*
 		 * All state reset _must_ be completed before we update the
 		 * reset counter, for otherwise waiters might miss the reset
 		 * pending state and not properly drop locks, resulting in
 		 * deadlocks with the reset work.
 		 */
-		ret = i915_reset(dev_priv);
+		if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
+			i915_reset(dev_priv);
+			mutex_unlock(&dev_priv->drm.struct_mutex);
+		}
 
-		intel_finish_reset(dev_priv);
+		/* We need to wait for anyone holding the lock to wakeup */
+	} while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
+				     I915_RESET_IN_PROGRESS,
+				     TASK_UNINTERRUPTIBLE,
+				     HZ));
 
-		intel_runtime_pm_put(dev_priv);
+	intel_finish_reset(dev_priv);
+	intel_runtime_pm_put(dev_priv);
 
-		if (ret == 0)
-			kobject_uevent_env(kobj,
-					   KOBJ_CHANGE, reset_done_event);
+	if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
+		kobject_uevent_env(kobj,
+				   KOBJ_CHANGE, reset_done_event);
 
-		/*
-		 * Note: The wake_up also serves as a memory barrier so that
-		 * waiters see the update value of the reset counter atomic_t.
-		 */
-		wake_up_all(&dev_priv->gpu_error.reset_queue);
-	}
+	/*
+	 * Note: The wake_up also serves as a memory barrier so that
+	 * waiters see the updated value of the dev_priv->gpu_error.
+	 */
+	wake_up_all(&dev_priv->gpu_error.reset_queue);
 }
 
 static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
@@ -2666,25 +2661,26 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
 	i915_capture_error_state(dev_priv, engine_mask, error_msg);
 	i915_report_and_clear_eir(dev_priv);
 
-	if (engine_mask) {
-		atomic_or(I915_RESET_IN_PROGRESS_FLAG,
-				&dev_priv->gpu_error.reset_counter);
+	if (!engine_mask)
+		return;
+
+	if (test_and_set_bit(I915_RESET_IN_PROGRESS,
+			     &dev_priv->gpu_error.flags))
+		return;
 
-		/*
-		 * Wakeup waiting processes so that the reset function
-		 * i915_reset_and_wakeup doesn't deadlock trying to grab
-		 * various locks. By bumping the reset counter first, the woken
-		 * processes will see a reset in progress and back off,
-		 * releasing their locks and then wait for the reset completion.
-		 * We must do this for _all_ gpu waiters that might hold locks
-		 * that the reset work needs to acquire.
-		 *
-		 * Note: The wake_up serves as the required memory barrier to
-		 * ensure that the waiters see the updated value of the reset
-		 * counter atomic_t.
-		 */
-		i915_error_wake_up(dev_priv);
-	}
+	/*
+	 * Wakeup waiting processes so that the reset function
+	 * i915_reset_and_wakeup doesn't deadlock trying to grab
+	 * various locks. By bumping the reset counter first, the woken
+	 * processes will see a reset in progress and back off,
+	 * releasing their locks and then wait for the reset completion.
+	 * We must do this for _all_ gpu waiters that might hold locks
+	 * that the reset work needs to acquire.
+	 *
+	 * Note: The wake_up also provides a memory barrier to ensure that the
+	 * waiters see the updated value of the reset flags.
+	 */
+	i915_error_wake_up(dev_priv);
 
 	i915_reset_and_wakeup(dev_priv);
 }
@@ -2835,10 +2831,10 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
 		}
 	}
 
-	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
-		  engine->id, ipehr, offset);
+	DRM_DEBUG_DRIVER("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
+			 engine->id, ipehr, offset);
 
-	return NULL;
+	return ERR_PTR(-ENODEV);
 }
 
 static struct intel_engine_cs *
@@ -2926,6 +2922,9 @@ static int semaphore_passed(struct intel_engine_cs *engine)
 	if (signaller == NULL)
 		return -1;
 
+	if (IS_ERR(signaller))
+		return 0;
+
 	/* Prevent pathological recursion due to driver bugs */
 	if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
 		return -1;
@@ -3079,6 +3078,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
 		bool busy = intel_engine_has_waiter(engine);
 		u64 acthd;
 		u32 seqno;
+		u32 submit;
 
 		semaphore_clear_deadlocks(dev_priv);
 
@@ -3094,14 +3094,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
 
 		acthd = intel_engine_get_active_head(engine);
 		seqno = intel_engine_get_seqno(engine);
+		submit = READ_ONCE(engine->last_submitted_seqno);
 
 		if (engine->hangcheck.seqno == seqno) {
-			if (!intel_engine_is_active(engine)) {
+			if (i915_seqno_passed(seqno, submit)) {
 				engine->hangcheck.action = HANGCHECK_IDLE;
-				if (busy) {
-					/* Safeguard against driver failure */
-					engine->hangcheck.score += BUSY;
-				}
 			} else {
 				/* We always increment the hangcheck score
 				 * if the engine is busy and still processing
@@ -3167,6 +3164,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
 
 	if (hung) {
 		char msg[80];
+		unsigned int tmp;
 		int len;
 
 		/* If some rings hung but others were still busy, only
@@ -3176,7 +3174,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
 			hung &= ~stuck;
 		len = scnprintf(msg, sizeof(msg),
 				"%s on ", stuck == hung ? "No progress" : "Hang");
-		for_each_engine_masked(engine, dev_priv, hung)
+		for_each_engine_masked(engine, dev_priv, hung, tmp)
 			len += scnprintf(msg + len, sizeof(msg) - len,
 					 "%s, ", engine->name);
 		msg[len-2] = '\0';
@@ -4502,7 +4500,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 		dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
 
 	if (INTEL_INFO(dev_priv)->gen >= 8)
-		dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+		dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC;
 
 	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
 			  i915_hangcheck_elapsed);

+ 156 - 134
drivers/gpu/drm/i915/i915_pci.c

@@ -54,208 +54,216 @@
 #define CHV_COLORS \
 	.color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
 
+#define GEN2_FEATURES \
+	.gen = 2, .num_pipes = 1, \
+	.has_overlay = 1, .overlay_needs_physical = 1, \
+	.has_gmch_display = 1, \
+	.hws_needs_physical = 1, \
+	.ring_mask = RENDER_RING, \
+	GEN_DEFAULT_PIPEOFFSETS, \
+	CURSOR_OFFSETS
+
 static const struct intel_device_info intel_i830_info = {
-	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
-	.has_overlay = 1, .overlay_needs_physical = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	GEN2_FEATURES,
+	.is_mobile = 1, .cursor_needs_physical = 1,
+	.num_pipes = 2, /* legal, last one wins */
 };
 
 static const struct intel_device_info intel_845g_info = {
-	.gen = 2, .num_pipes = 1,
-	.has_overlay = 1, .overlay_needs_physical = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	GEN2_FEATURES,
 };
 
 static const struct intel_device_info intel_i85x_info = {
-	.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
+	GEN2_FEATURES,
+	.is_i85x = 1, .is_mobile = 1,
+	.num_pipes = 2, /* legal, last one wins */
 	.cursor_needs_physical = 1,
-	.has_overlay = 1, .overlay_needs_physical = 1,
 	.has_fbc = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_i865g_info = {
-	.gen = 2, .num_pipes = 1,
-	.has_overlay = 1, .overlay_needs_physical = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	GEN2_FEATURES,
 };
 
+#define GEN3_FEATURES \
+	.gen = 3, .num_pipes = 2, \
+	.has_gmch_display = 1, \
+	.ring_mask = RENDER_RING, \
+	GEN_DEFAULT_PIPEOFFSETS, \
+	CURSOR_OFFSETS
+
 static const struct intel_device_info intel_i915g_info = {
-	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+	GEN3_FEATURES,
+	.is_i915g = 1, .cursor_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	.hws_needs_physical = 1,
 };
 static const struct intel_device_info intel_i915gm_info = {
-	.gen = 3, .is_mobile = 1, .num_pipes = 2,
+	GEN3_FEATURES,
+	.is_mobile = 1,
 	.cursor_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
 	.supports_tv = 1,
 	.has_fbc = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	.hws_needs_physical = 1,
 };
 static const struct intel_device_info intel_i945g_info = {
-	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+	GEN3_FEATURES,
+	.has_hotplug = 1, .cursor_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	.hws_needs_physical = 1,
 };
 static const struct intel_device_info intel_i945gm_info = {
-	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
+	GEN3_FEATURES,
+	.is_i945gm = 1, .is_mobile = 1,
 	.has_hotplug = 1, .cursor_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
 	.supports_tv = 1,
 	.has_fbc = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	.hws_needs_physical = 1,
 };
 
+#define GEN4_FEATURES \
+	.gen = 4, .num_pipes = 2, \
+	.has_hotplug = 1, \
+	.has_gmch_display = 1, \
+	.ring_mask = RENDER_RING, \
+	GEN_DEFAULT_PIPEOFFSETS, \
+	CURSOR_OFFSETS
+
 static const struct intel_device_info intel_i965g_info = {
-	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
-	.has_hotplug = 1,
+	GEN4_FEATURES,
+	.is_broadwater = 1,
 	.has_overlay = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	.hws_needs_physical = 1,
 };
 
 static const struct intel_device_info intel_i965gm_info = {
-	.gen = 4, .is_crestline = 1, .num_pipes = 2,
-	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
+	GEN4_FEATURES,
+	.is_crestline = 1,
+	.is_mobile = 1, .has_fbc = 1,
 	.has_overlay = 1,
 	.supports_tv = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	.hws_needs_physical = 1,
 };
 
 static const struct intel_device_info intel_g33_info = {
-	.gen = 3, .is_g33 = 1, .num_pipes = 2,
-	.need_gfx_hws = 1, .has_hotplug = 1,
+	GEN3_FEATURES,
+	.is_g33 = 1,
+	.has_hotplug = 1,
 	.has_overlay = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_g45_info = {
-	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
-	.has_pipe_cxsr = 1, .has_hotplug = 1,
+	GEN4_FEATURES,
+	.is_g4x = 1,
+	.has_pipe_cxsr = 1,
 	.ring_mask = RENDER_RING | BSD_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_gm45_info = {
-	.gen = 4, .is_g4x = 1, .num_pipes = 2,
-	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
-	.has_pipe_cxsr = 1, .has_hotplug = 1,
+	GEN4_FEATURES,
+	.is_g4x = 1,
+	.is_mobile = 1, .has_fbc = 1,
+	.has_pipe_cxsr = 1,
 	.supports_tv = 1,
 	.ring_mask = RENDER_RING | BSD_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
 };
 
 static const struct intel_device_info intel_pineview_info = {
-	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
-	.need_gfx_hws = 1, .has_hotplug = 1,
+	GEN3_FEATURES,
+	.is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
+	.has_hotplug = 1,
 	.has_overlay = 1,
-	.ring_mask = RENDER_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
 };
 
+#define GEN5_FEATURES \
+	.gen = 5, .num_pipes = 2, \
+	.has_hotplug = 1, \
+	.has_gmbus_irq = 1, \
+	.ring_mask = RENDER_RING | BSD_RING, \
+	GEN_DEFAULT_PIPEOFFSETS, \
+	CURSOR_OFFSETS
+
 static const struct intel_device_info intel_ironlake_d_info = {
-	.gen = 5, .num_pipes = 2,
-	.need_gfx_hws = 1, .has_hotplug = 1,
-	.ring_mask = RENDER_RING | BSD_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	GEN5_FEATURES,
 };
 
 static const struct intel_device_info intel_ironlake_m_info = {
-	.gen = 5, .is_mobile = 1, .num_pipes = 2,
-	.need_gfx_hws = 1, .has_hotplug = 1,
-	.has_fbc = 1,
-	.ring_mask = RENDER_RING | BSD_RING,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	GEN5_FEATURES,
+	.is_mobile = 1,
 };
 
+#define GEN6_FEATURES \
+	.gen = 6, .num_pipes = 2, \
+	.has_hotplug = 1, \
+	.has_fbc = 1, \
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+	.has_llc = 1, \
+	.has_rc6 = 1, \
+	.has_rc6p = 1, \
+	.has_gmbus_irq = 1, \
+	.has_hw_contexts = 1, \
+	GEN_DEFAULT_PIPEOFFSETS, \
+	CURSOR_OFFSETS
+
 static const struct intel_device_info intel_sandybridge_d_info = {
-	.gen = 6, .num_pipes = 2,
-	.need_gfx_hws = 1, .has_hotplug = 1,
-	.has_fbc = 1,
-	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
-	.has_llc = 1,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	GEN6_FEATURES,
 };
 
 static const struct intel_device_info intel_sandybridge_m_info = {
-	.gen = 6, .is_mobile = 1, .num_pipes = 2,
-	.need_gfx_hws = 1, .has_hotplug = 1,
-	.has_fbc = 1,
-	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
-	.has_llc = 1,
-	GEN_DEFAULT_PIPEOFFSETS,
-	CURSOR_OFFSETS,
+	GEN6_FEATURES,
+	.is_mobile = 1,
 };
 
 #define GEN7_FEATURES  \
 	.gen = 7, .num_pipes = 3, \
-	.need_gfx_hws = 1, .has_hotplug = 1, \
+	.has_hotplug = 1, \
 	.has_fbc = 1, \
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
 	.has_llc = 1, \
+	.has_rc6 = 1, \
+	.has_rc6p = 1, \
+	.has_gmbus_irq = 1, \
+	.has_hw_contexts = 1, \
 	GEN_DEFAULT_PIPEOFFSETS, \
 	IVB_CURSOR_OFFSETS
 
 static const struct intel_device_info intel_ivybridge_d_info = {
 	GEN7_FEATURES,
 	.is_ivybridge = 1,
+	.has_l3_dpf = 1,
 };
 
 static const struct intel_device_info intel_ivybridge_m_info = {
 	GEN7_FEATURES,
 	.is_ivybridge = 1,
 	.is_mobile = 1,
+	.has_l3_dpf = 1,
 };
 
 static const struct intel_device_info intel_ivybridge_q_info = {
 	GEN7_FEATURES,
 	.is_ivybridge = 1,
 	.num_pipes = 0, /* legal, last one wins */
+	.has_l3_dpf = 1,
 };
 
 #define VLV_FEATURES  \
 	.gen = 7, .num_pipes = 2, \
-	.need_gfx_hws = 1, .has_hotplug = 1, \
+	.has_psr = 1, \
+	.has_runtime_pm = 1, \
+	.has_rc6 = 1, \
+	.has_gmbus_irq = 1, \
+	.has_hw_contexts = 1, \
+	.has_gmch_display = 1, \
+	.has_hotplug = 1, \
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
 	.display_mmio_offset = VLV_DISPLAY_BASE, \
 	GEN_DEFAULT_PIPEOFFSETS, \
 	CURSOR_OFFSETS
 
-static const struct intel_device_info intel_valleyview_m_info = {
-	VLV_FEATURES,
-	.is_valleyview = 1,
-	.is_mobile = 1,
-};
-
-static const struct intel_device_info intel_valleyview_d_info = {
+static const struct intel_device_info intel_valleyview_info = {
 	VLV_FEATURES,
 	.is_valleyview = 1,
 };
@@ -264,54 +272,50 @@ static const struct intel_device_info intel_valleyview_d_info = {
 	GEN7_FEATURES, \
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
 	.has_ddi = 1, \
-	.has_fpga_dbg = 1
-
-static const struct intel_device_info intel_haswell_d_info = {
-	HSW_FEATURES,
-	.is_haswell = 1,
-};
-
-static const struct intel_device_info intel_haswell_m_info = {
+	.has_fpga_dbg = 1, \
+	.has_psr = 1, \
+	.has_resource_streamer = 1, \
+	.has_dp_mst = 1, \
+	.has_rc6p = 0 /* RC6p removed-by HSW */, \
+	.has_runtime_pm = 1
+
+static const struct intel_device_info intel_haswell_info = {
 	HSW_FEATURES,
 	.is_haswell = 1,
-	.is_mobile = 1,
+	.has_l3_dpf = 1,
 };
 
 #define BDW_FEATURES \
 	HSW_FEATURES, \
-	BDW_COLORS
+	BDW_COLORS, \
+	.has_logical_ring_contexts = 1
 
-static const struct intel_device_info intel_broadwell_d_info = {
+static const struct intel_device_info intel_broadwell_info = {
 	BDW_FEATURES,
 	.gen = 8,
 	.is_broadwell = 1,
 };
 
-static const struct intel_device_info intel_broadwell_m_info = {
-	BDW_FEATURES,
-	.gen = 8, .is_mobile = 1,
-	.is_broadwell = 1,
-};
-
-static const struct intel_device_info intel_broadwell_gt3d_info = {
+static const struct intel_device_info intel_broadwell_gt3_info = {
 	BDW_FEATURES,
 	.gen = 8,
 	.is_broadwell = 1,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
 };
 
-static const struct intel_device_info intel_broadwell_gt3m_info = {
-	BDW_FEATURES,
-	.gen = 8, .is_mobile = 1,
-	.is_broadwell = 1,
-	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-};
-
 static const struct intel_device_info intel_cherryview_info = {
 	.gen = 8, .num_pipes = 3,
-	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_hotplug = 1,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
 	.is_cherryview = 1,
+	.has_psr = 1,
+	.has_runtime_pm = 1,
+	.has_resource_streamer = 1,
+	.has_rc6 = 1,
+	.has_gmbus_irq = 1,
+	.has_hw_contexts = 1,
+	.has_logical_ring_contexts = 1,
+	.has_gmch_display = 1,
 	.display_mmio_offset = VLV_DISPLAY_BASE,
 	GEN_CHV_PIPEOFFSETS,
 	CURSOR_OFFSETS,
@@ -322,25 +326,41 @@ static const struct intel_device_info intel_skylake_info = {
 	BDW_FEATURES,
 	.is_skylake = 1,
 	.gen = 9,
+	.has_csr = 1,
+	.has_guc = 1,
+	.ddb_size = 896,
 };
 
 static const struct intel_device_info intel_skylake_gt3_info = {
 	BDW_FEATURES,
 	.is_skylake = 1,
 	.gen = 9,
+	.has_csr = 1,
+	.has_guc = 1,
+	.ddb_size = 896,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
 };
 
 static const struct intel_device_info intel_broxton_info = {
 	.is_broxton = 1,
 	.gen = 9,
-	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_hotplug = 1,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
 	.num_pipes = 3,
 	.has_ddi = 1,
 	.has_fpga_dbg = 1,
 	.has_fbc = 1,
+	.has_runtime_pm = 1,
 	.has_pooled_eu = 0,
+	.has_csr = 1,
+	.has_resource_streamer = 1,
+	.has_rc6 = 1,
+	.has_dp_mst = 1,
+	.has_gmbus_irq = 1,
+	.has_hw_contexts = 1,
+	.has_logical_ring_contexts = 1,
+	.has_guc = 1,
+	.ddb_size = 512,
 	GEN_DEFAULT_PIPEOFFSETS,
 	IVB_CURSOR_OFFSETS,
 	BDW_COLORS,
@@ -350,12 +370,18 @@ static const struct intel_device_info intel_kabylake_info = {
 	BDW_FEATURES,
 	.is_kabylake = 1,
 	.gen = 9,
+	.has_csr = 1,
+	.has_guc = 1,
+	.ddb_size = 896,
 };
 
 static const struct intel_device_info intel_kabylake_gt3_info = {
 	BDW_FEATURES,
 	.is_kabylake = 1,
 	.gen = 9,
+	.has_csr = 1,
+	.has_guc = 1,
+	.ddb_size = 896,
 	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
 };
 
@@ -387,14 +413,10 @@ static const struct pci_device_id pciidlist[] = {
 	INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
 	INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
 	INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
-	INTEL_HSW_D_IDS(&intel_haswell_d_info),
-	INTEL_HSW_M_IDS(&intel_haswell_m_info),
-	INTEL_VLV_M_IDS(&intel_valleyview_m_info),
-	INTEL_VLV_D_IDS(&intel_valleyview_d_info),
-	INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
-	INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
-	INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
-	INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
+	INTEL_HSW_IDS(&intel_haswell_info),
+	INTEL_VLV_IDS(&intel_valleyview_info),
+	INTEL_BDW_GT12_IDS(&intel_broadwell_info),
+	INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info),
 	INTEL_CHV_IDS(&intel_cherryview_info),
 	INTEL_SKL_GT1_IDS(&intel_skylake_info),
 	INTEL_SKL_GT2_IDS(&intel_skylake_info),

+ 14 - 1
drivers/gpu/drm/i915/i915_reg.h

@@ -7067,7 +7067,7 @@ enum {
 #define VLV_RCEDATA				_MMIO(0xA0BC)
 #define GEN6_RC6pp_THRESHOLD			_MMIO(0xA0C0)
 #define GEN6_PMINTRMSK				_MMIO(0xA168)
-#define   GEN8_PMINTR_REDIRECT_TO_NON_DISP	(1<<31)
+#define   GEN8_PMINTR_REDIRECT_TO_GUC		  (1<<31)
 #define GEN8_MISC_CTRL0				_MMIO(0xA180)
 #define VLV_PWRDWNUPCTL				_MMIO(0xA294)
 #define GEN9_MEDIA_PG_IDLE_HYSTERESIS		_MMIO(0xA0C4)
@@ -7123,6 +7123,15 @@ enum {
 
 #define GEN6_PCODE_MAILBOX			_MMIO(0x138124)
 #define   GEN6_PCODE_READY			(1<<31)
+#define   GEN6_PCODE_ERROR_MASK			0xFF
+#define     GEN6_PCODE_SUCCESS			0x0
+#define     GEN6_PCODE_ILLEGAL_CMD		0x1
+#define     GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x2
+#define     GEN6_PCODE_TIMEOUT			0x3
+#define     GEN6_PCODE_UNIMPLEMENTED_CMD	0xFF
+#define     GEN7_PCODE_TIMEOUT			0x2
+#define     GEN7_PCODE_ILLEGAL_DATA		0x3
+#define     GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
 #define	  GEN6_PCODE_WRITE_RC6VIDS		0x4
 #define	  GEN6_PCODE_READ_RC6VIDS		0x5
 #define     GEN6_ENCODE_RC6_VID(mv)		(((mv) - 245) / 5)
@@ -7144,6 +7153,10 @@ enum {
 #define   HSW_PCODE_DE_WRITE_FREQ_REQ		0x17
 #define   DISPLAY_IPS_CONTROL			0x19
 #define	  HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL	0x1A
+#define   GEN9_PCODE_SAGV_CONTROL		0x21
+#define     GEN9_SAGV_DISABLE			0x0
+#define     GEN9_SAGV_IS_DISABLED		0x1
+#define     GEN9_SAGV_ENABLE			0x3
 #define GEN6_PCODE_DATA				_MMIO(0x138128)
 #define   GEN6_PCODE_FREQ_IA_RATIO_SHIFT	8
 #define   GEN6_PCODE_FREQ_RING_RATIO_SHIFT	16

+ 4 - 2
drivers/gpu/drm/i915/i915_suspend.c

@@ -63,6 +63,7 @@ static void i915_restore_display(struct drm_device *dev)
 int i915_save_state(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	int i;
 
 	mutex_lock(&dev->struct_mutex);
@@ -70,7 +71,7 @@ int i915_save_state(struct drm_device *dev)
 	i915_save_display(dev);
 
 	if (IS_GEN4(dev))
-		pci_read_config_word(dev->pdev, GCDGMBUS,
+		pci_read_config_word(pdev, GCDGMBUS,
 				     &dev_priv->regfile.saveGCDGMBUS);
 
 	/* Cache mode state */
@@ -108,6 +109,7 @@ int i915_save_state(struct drm_device *dev)
 int i915_restore_state(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	int i;
 
 	mutex_lock(&dev->struct_mutex);
@@ -115,7 +117,7 @@ int i915_restore_state(struct drm_device *dev)
 	i915_gem_restore_fences(dev);
 
 	if (IS_GEN4(dev))
-		pci_write_config_word(dev->pdev, GCDGMBUS,
+		pci_write_config_word(pdev, GCDGMBUS,
 				      dev_priv->regfile.saveGCDGMBUS);
 	i915_restore_display(dev);
 

+ 362 - 0
drivers/gpu/drm/i915/i915_sw_fence.c

@@ -0,0 +1,362 @@
+/*
+ * (C) Copyright 2016 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/slab.h>
+#include <linux/fence.h>
+#include <linux/reservation.h>
+
+#include "i915_sw_fence.h"
+
+static DEFINE_SPINLOCK(i915_sw_fence_lock);
+
+static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
+				  enum i915_sw_fence_notify state)
+{
+	i915_sw_fence_notify_t fn;
+
+	fn = (i915_sw_fence_notify_t)(fence->flags & I915_SW_FENCE_MASK);
+	return fn(fence, state);
+}
+
+static void i915_sw_fence_free(struct kref *kref)
+{
+	struct i915_sw_fence *fence = container_of(kref, typeof(*fence), kref);
+
+	WARN_ON(atomic_read(&fence->pending) > 0);
+
+	if (fence->flags & I915_SW_FENCE_MASK)
+		__i915_sw_fence_notify(fence, FENCE_FREE);
+	else
+		kfree(fence);
+}
+
+static void i915_sw_fence_put(struct i915_sw_fence *fence)
+{
+	kref_put(&fence->kref, i915_sw_fence_free);
+}
+
+static struct i915_sw_fence *i915_sw_fence_get(struct i915_sw_fence *fence)
+{
+	kref_get(&fence->kref);
+	return fence;
+}
+
+static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
+					struct list_head *continuation)
+{
+	wait_queue_head_t *x = &fence->wait;
+	wait_queue_t *pos, *next;
+	unsigned long flags;
+
+	atomic_set_release(&fence->pending, -1); /* 0 -> -1 [done] */
+
+	/*
+	 * To prevent unbounded recursion as we traverse the graph of
+	 * i915_sw_fences, we move the task_list from this, the next ready
+	 * fence, to the tail of the original fence's task_list
+	 * (and so added to the list to be woken).
+	 */
+
+	spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation);
+	if (continuation) {
+		list_for_each_entry_safe(pos, next, &x->task_list, task_list) {
+			if (pos->func == autoremove_wake_function)
+				pos->func(pos, TASK_NORMAL, 0, continuation);
+			else
+				list_move_tail(&pos->task_list, continuation);
+		}
+	} else {
+		LIST_HEAD(extra);
+
+		do {
+			list_for_each_entry_safe(pos, next,
+						 &x->task_list, task_list)
+				pos->func(pos, TASK_NORMAL, 0, &extra);
+
+			if (list_empty(&extra))
+				break;
+
+			list_splice_tail_init(&extra, &x->task_list);
+		} while (1);
+	}
+	spin_unlock_irqrestore(&x->lock, flags);
+}
+
+static void __i915_sw_fence_complete(struct i915_sw_fence *fence,
+				     struct list_head *continuation)
+{
+	if (!atomic_dec_and_test(&fence->pending))
+		return;
+
+	if (fence->flags & I915_SW_FENCE_MASK &&
+	    __i915_sw_fence_notify(fence, FENCE_COMPLETE) != NOTIFY_DONE)
+		return;
+
+	__i915_sw_fence_wake_up_all(fence, continuation);
+}
+
+static void i915_sw_fence_complete(struct i915_sw_fence *fence)
+{
+	if (WARN_ON(i915_sw_fence_done(fence)))
+		return;
+
+	__i915_sw_fence_complete(fence, NULL);
+}
+
+static void i915_sw_fence_await(struct i915_sw_fence *fence)
+{
+	WARN_ON(atomic_inc_return(&fence->pending) <= 1);
+}
+
+void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn)
+{
+	BUG_ON((unsigned long)fn & ~I915_SW_FENCE_MASK);
+
+	init_waitqueue_head(&fence->wait);
+	kref_init(&fence->kref);
+	atomic_set(&fence->pending, 1);
+	fence->flags = (unsigned long)fn;
+}
+
+void i915_sw_fence_commit(struct i915_sw_fence *fence)
+{
+	i915_sw_fence_complete(fence);
+	i915_sw_fence_put(fence);
+}
+
+static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key)
+{
+	list_del(&wq->task_list);
+	__i915_sw_fence_complete(wq->private, key);
+	i915_sw_fence_put(wq->private);
+	return 0;
+}
+
+static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
+				    const struct i915_sw_fence * const signaler)
+{
+	wait_queue_t *wq;
+
+	if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
+		return false;
+
+	if (fence == signaler)
+		return true;
+
+	list_for_each_entry(wq, &fence->wait.task_list, task_list) {
+		if (wq->func != i915_sw_fence_wake)
+			continue;
+
+		if (__i915_sw_fence_check_if_after(wq->private, signaler))
+			return true;
+	}
+
+	return false;
+}
+
+static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence)
+{
+	wait_queue_t *wq;
+
+	if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
+		return;
+
+	list_for_each_entry(wq, &fence->wait.task_list, task_list) {
+		if (wq->func != i915_sw_fence_wake)
+			continue;
+
+		__i915_sw_fence_clear_checked_bit(wq->private);
+	}
+}
+
+static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
+				  const struct i915_sw_fence * const signaler)
+{
+	unsigned long flags;
+	bool err;
+
+	if (!IS_ENABLED(CONFIG_I915_SW_FENCE_CHECK_DAG))
+		return false;
+
+	spin_lock_irqsave(&i915_sw_fence_lock, flags);
+	err = __i915_sw_fence_check_if_after(fence, signaler);
+	__i915_sw_fence_clear_checked_bit(fence);
+	spin_unlock_irqrestore(&i915_sw_fence_lock, flags);
+
+	return err;
+}
+
+int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
+				 struct i915_sw_fence *signaler,
+				 wait_queue_t *wq)
+{
+	unsigned long flags;
+	int pending;
+
+	if (i915_sw_fence_done(signaler))
+		return 0;
+
+	/* The dependency graph must be acyclic. */
+	if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
+		return -EINVAL;
+
+	INIT_LIST_HEAD(&wq->task_list);
+	wq->flags = 0;
+	wq->func = i915_sw_fence_wake;
+	wq->private = i915_sw_fence_get(fence);
+
+	i915_sw_fence_await(fence);
+
+	spin_lock_irqsave(&signaler->wait.lock, flags);
+	if (likely(!i915_sw_fence_done(signaler))) {
+		__add_wait_queue_tail(&signaler->wait, wq);
+		pending = 1;
+	} else {
+		i915_sw_fence_wake(wq, 0, 0, NULL);
+		pending = 0;
+	}
+	spin_unlock_irqrestore(&signaler->wait.lock, flags);
+
+	return pending;
+}
+
+struct dma_fence_cb {
+	struct fence_cb base;
+	struct i915_sw_fence *fence;
+	struct fence *dma;
+	struct timer_list timer;
+};
+
+static void timer_i915_sw_fence_wake(unsigned long data)
+{
+	struct dma_fence_cb *cb = (struct dma_fence_cb *)data;
+
+	printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n",
+	       cb->dma->ops->get_driver_name(cb->dma),
+	       cb->dma->ops->get_timeline_name(cb->dma),
+	       cb->dma->seqno);
+	fence_put(cb->dma);
+	cb->dma = NULL;
+
+	i915_sw_fence_commit(cb->fence);
+	cb->timer.function = NULL;
+}
+
+static void dma_i915_sw_fence_wake(struct fence *dma, struct fence_cb *data)
+{
+	struct dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+
+	del_timer_sync(&cb->timer);
+	if (cb->timer.function)
+		i915_sw_fence_commit(cb->fence);
+	fence_put(cb->dma);
+
+	kfree(cb);
+}
+
+int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
+				  struct fence *dma,
+				  unsigned long timeout,
+				  gfp_t gfp)
+{
+	struct dma_fence_cb *cb;
+	int ret;
+
+	if (fence_is_signaled(dma))
+		return 0;
+
+	cb = kmalloc(sizeof(*cb), gfp);
+	if (!cb) {
+		if (!gfpflags_allow_blocking(gfp))
+			return -ENOMEM;
+
+		return fence_wait(dma, false);
+	}
+
+	cb->fence = i915_sw_fence_get(fence);
+	i915_sw_fence_await(fence);
+
+	cb->dma = NULL;
+	__setup_timer(&cb->timer,
+		      timer_i915_sw_fence_wake, (unsigned long)cb,
+		      TIMER_IRQSAFE);
+	if (timeout) {
+		cb->dma = fence_get(dma);
+		mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout));
+	}
+
+	ret = fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake);
+	if (ret == 0) {
+		ret = 1;
+	} else {
+		dma_i915_sw_fence_wake(dma, &cb->base);
+		if (ret == -ENOENT) /* fence already signaled */
+			ret = 0;
+	}
+
+	return ret;
+}
+
+int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
+				    struct reservation_object *resv,
+				    const struct fence_ops *exclude,
+				    bool write,
+				    unsigned long timeout,
+				    gfp_t gfp)
+{
+	struct fence *excl;
+	int ret = 0, pending;
+
+	if (write) {
+		struct fence **shared;
+		unsigned int count, i;
+
+		ret = reservation_object_get_fences_rcu(resv,
+							&excl, &count, &shared);
+		if (ret)
+			return ret;
+
+		for (i = 0; i < count; i++) {
+			if (shared[i]->ops == exclude)
+				continue;
+
+			pending = i915_sw_fence_await_dma_fence(fence,
+								shared[i],
+								timeout,
+								gfp);
+			if (pending < 0) {
+				ret = pending;
+				break;
+			}
+
+			ret |= pending;
+		}
+
+		for (i = 0; i < count; i++)
+			fence_put(shared[i]);
+		kfree(shared);
+	} else {
+		excl = reservation_object_get_excl_rcu(resv);
+	}
+
+	if (ret >= 0 && excl && excl->ops != exclude) {
+		pending = i915_sw_fence_await_dma_fence(fence,
+							excl,
+							timeout,
+							gfp);
+		if (pending < 0)
+			ret = pending;
+		else
+			ret |= pending;
+	}
+
+	fence_put(excl);
+
+	return ret;
+}

+ 65 - 0
drivers/gpu/drm/i915/i915_sw_fence.h

@@ -0,0 +1,65 @@
+/*
+ * i915_sw_fence.h - library routines for N:M synchronisation points
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * This file is released under the GPLv2.
+ *
+ */
+
+#ifndef _I915_SW_FENCE_H_
+#define _I915_SW_FENCE_H_
+
+#include <linux/gfp.h>
+#include <linux/kref.h>
+#include <linux/notifier.h> /* for NOTIFY_DONE */
+#include <linux/wait.h>
+
+struct completion;
+struct fence;
+struct fence_ops;
+struct reservation_object;
+
+struct i915_sw_fence {
+	wait_queue_head_t wait;
+	unsigned long flags;
+	struct kref kref;
+	atomic_t pending;
+};
+
+#define I915_SW_FENCE_CHECKED_BIT	0 /* used internally for DAG checking */
+#define I915_SW_FENCE_PRIVATE_BIT	1 /* available for use by owner */
+#define I915_SW_FENCE_MASK		(~3)
+
+enum i915_sw_fence_notify {
+	FENCE_COMPLETE,
+	FENCE_FREE
+};
+
+typedef int (*i915_sw_fence_notify_t)(struct i915_sw_fence *,
+				      enum i915_sw_fence_notify state);
+#define __i915_sw_fence_call __aligned(4)
+
+void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn);
+void i915_sw_fence_commit(struct i915_sw_fence *fence);
+
+int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
+				 struct i915_sw_fence *after,
+				 wait_queue_t *wq);
+int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
+				  struct fence *dma,
+				  unsigned long timeout,
+				  gfp_t gfp);
+int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
+				    struct reservation_object *resv,
+				    const struct fence_ops *exclude,
+				    bool write,
+				    unsigned long timeout,
+				    gfp_t gfp);
+
+static inline bool i915_sw_fence_done(const struct i915_sw_fence *fence)
+{
+	return atomic_read(&fence->pending) < 0;
+}
+
+#endif /* _I915_SW_FENCE_H_ */

+ 74 - 97
drivers/gpu/drm/i915/i915_sysfs.c

@@ -32,13 +32,16 @@
 #include "intel_drv.h"
 #include "i915_drv.h"
 
-#define dev_to_drm_minor(d) dev_get_drvdata((d))
+static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
+{
+	struct drm_minor *minor = dev_get_drvdata(kdev);
+	return to_i915(minor->dev);
+}
 
 #ifdef CONFIG_PM
-static u32 calc_residency(struct drm_device *dev,
+static u32 calc_residency(struct drm_i915_private *dev_priv,
 			  i915_reg_t reg)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	u64 raw_time; /* 32b value may overflow during fixed point math */
 	u64 units = 128ULL, div = 100000ULL;
 	u32 ret;
@@ -49,13 +52,13 @@ static u32 calc_residency(struct drm_device *dev,
 	intel_runtime_pm_get(dev_priv);
 
 	/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		units = 1;
 		div = dev_priv->czclk_freq;
 
 		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
 			units <<= 8;
-	} else if (IS_BROXTON(dev)) {
+	} else if (IS_BROXTON(dev_priv)) {
 		units = 1;
 		div = 1200;		/* 833.33ns */
 	}
@@ -76,32 +79,32 @@ show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
 static ssize_t
 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = dev_get_drvdata(kdev);
-	u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	u32 rc6_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6);
 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
 }
 
 static ssize_t
 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = dev_to_drm_minor(kdev);
-	u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	u32 rc6p_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6p);
 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
 }
 
 static ssize_t
 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = dev_to_drm_minor(kdev);
-	u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	u32 rc6pp_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6pp);
 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
 }
 
 static ssize_t
 show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = dev_get_drvdata(kdev);
-	u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	u32 rc6_residency = calc_residency(dev_priv, VLV_GT_MEDIA_RC6);
 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
 }
 
@@ -144,9 +147,9 @@ static struct attribute_group media_rc6_attr_group = {
 };
 #endif
 
-static int l3_access_valid(struct drm_device *dev, loff_t offset)
+static int l3_access_valid(struct drm_i915_private *dev_priv, loff_t offset)
 {
-	if (!HAS_L3_DPF(dev))
+	if (!HAS_L3_DPF(dev_priv))
 		return -EPERM;
 
 	if (offset % 4 != 0)
@@ -163,22 +166,21 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
 	     struct bin_attribute *attr, char *buf,
 	     loff_t offset, size_t count)
 {
-	struct device *dev = kobj_to_dev(kobj);
-	struct drm_minor *dminor = dev_to_drm_minor(dev);
-	struct drm_device *drm_dev = dminor->dev;
-	struct drm_i915_private *dev_priv = to_i915(drm_dev);
+	struct device *kdev = kobj_to_dev(kobj);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct drm_device *dev = &dev_priv->drm;
 	int slice = (int)(uintptr_t)attr->private;
 	int ret;
 
 	count = round_down(count, 4);
 
-	ret = l3_access_valid(drm_dev, offset);
+	ret = l3_access_valid(dev_priv, offset);
 	if (ret)
 		return ret;
 
 	count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
 
-	ret = i915_mutex_lock_interruptible(drm_dev);
+	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
 		return ret;
 
@@ -189,7 +191,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
 	else
 		memset(buf, 0, count);
 
-	mutex_unlock(&drm_dev->struct_mutex);
+	mutex_unlock(&dev->struct_mutex);
 
 	return count;
 }
@@ -199,30 +201,29 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
 	      struct bin_attribute *attr, char *buf,
 	      loff_t offset, size_t count)
 {
-	struct device *dev = kobj_to_dev(kobj);
-	struct drm_minor *dminor = dev_to_drm_minor(dev);
-	struct drm_device *drm_dev = dminor->dev;
-	struct drm_i915_private *dev_priv = to_i915(drm_dev);
+	struct device *kdev = kobj_to_dev(kobj);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct drm_device *dev = &dev_priv->drm;
 	struct i915_gem_context *ctx;
 	u32 *temp = NULL; /* Just here to make handling failures easy */
 	int slice = (int)(uintptr_t)attr->private;
 	int ret;
 
-	if (!HAS_HW_CONTEXTS(drm_dev))
+	if (!HAS_HW_CONTEXTS(dev_priv))
 		return -ENXIO;
 
-	ret = l3_access_valid(drm_dev, offset);
+	ret = l3_access_valid(dev_priv, offset);
 	if (ret)
 		return ret;
 
-	ret = i915_mutex_lock_interruptible(drm_dev);
+	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
 		return ret;
 
 	if (!dev_priv->l3_parity.remap_info[slice]) {
 		temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
 		if (!temp) {
-			mutex_unlock(&drm_dev->struct_mutex);
+			mutex_unlock(&dev->struct_mutex);
 			return -ENOMEM;
 		}
 	}
@@ -240,7 +241,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
 	list_for_each_entry(ctx, &dev_priv->context_list, link)
 		ctx->remap_slice |= (1<<slice);
 
-	mutex_unlock(&drm_dev->struct_mutex);
+	mutex_unlock(&dev->struct_mutex);
 
 	return count;
 }
@@ -266,9 +267,7 @@ static struct bin_attribute dpf_attrs_1 = {
 static ssize_t gt_act_freq_mhz_show(struct device *kdev,
 				    struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *minor = dev_to_drm_minor(kdev);
-	struct drm_device *dev = minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 	int ret;
 
 	intel_runtime_pm_get(dev_priv);
@@ -298,9 +297,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
 				    struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *minor = dev_to_drm_minor(kdev);
-	struct drm_device *dev = minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
 			intel_gpu_freq(dev_priv,
@@ -309,8 +306,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
 
 static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *minor = dev_to_drm_minor(kdev);
-	struct drm_i915_private *dev_priv = to_i915(minor->dev);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
 			intel_gpu_freq(dev_priv,
@@ -321,9 +317,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
 				       struct device_attribute *attr,
 				       const char *buf, size_t count)
 {
-	struct drm_minor *minor = dev_to_drm_minor(kdev);
-	struct drm_device *dev = minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 	u32 val;
 	ssize_t ret;
 
@@ -346,9 +340,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
 				     struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *minor = dev_to_drm_minor(kdev);
-	struct drm_device *dev = minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
 			intel_gpu_freq(dev_priv,
@@ -357,9 +349,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
 
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *minor = dev_to_drm_minor(kdev);
-	struct drm_device *dev = minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
 			intel_gpu_freq(dev_priv,
@@ -370,9 +360,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 				     struct device_attribute *attr,
 				     const char *buf, size_t count)
 {
-	struct drm_minor *minor = dev_to_drm_minor(kdev);
-	struct drm_device *dev = minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 	u32 val;
 	ssize_t ret;
 
@@ -418,9 +406,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 
 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *minor = dev_to_drm_minor(kdev);
-	struct drm_device *dev = minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
 			intel_gpu_freq(dev_priv,
@@ -431,9 +417,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 				     struct device_attribute *attr,
 				     const char *buf, size_t count)
 {
-	struct drm_minor *minor = dev_to_drm_minor(kdev);
-	struct drm_device *dev = minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 	u32 val;
 	ssize_t ret;
 
@@ -490,9 +474,7 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
 /* For now we have a static number of RP states */
 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *minor = dev_to_drm_minor(kdev);
-	struct drm_device *dev = minor->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 	u32 val;
 
 	if (attr == &dev_attr_gt_RP0_freq_mhz)
@@ -538,8 +520,8 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
 {
 
 	struct device *kdev = kobj_to_dev(kobj);
-	struct drm_minor *minor = dev_to_drm_minor(kdev);
-	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct drm_device *dev = &dev_priv->drm;
 	struct i915_error_state_file_priv error_priv;
 	struct drm_i915_error_state_buf error_str;
 	ssize_t ret_count = 0;
@@ -573,18 +555,10 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
 				 loff_t off, size_t count)
 {
 	struct device *kdev = kobj_to_dev(kobj);
-	struct drm_minor *minor = dev_to_drm_minor(kdev);
-	struct drm_device *dev = minor->dev;
-	int ret;
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
 	DRM_DEBUG_DRIVER("Resetting error state\n");
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-
-	i915_destroy_error_state(dev);
-	mutex_unlock(&dev->struct_mutex);
+	i915_destroy_error_state(&dev_priv->drm);
 
 	return count;
 }
@@ -597,37 +571,38 @@ static struct bin_attribute error_state_attr = {
 	.write = error_state_write,
 };
 
-void i915_setup_sysfs(struct drm_device *dev)
+void i915_setup_sysfs(struct drm_i915_private *dev_priv)
 {
+	struct device *kdev = dev_priv->drm.primary->kdev;
 	int ret;
 
 #ifdef CONFIG_PM
-	if (HAS_RC6(dev)) {
-		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+	if (HAS_RC6(dev_priv)) {
+		ret = sysfs_merge_group(&kdev->kobj,
 					&rc6_attr_group);
 		if (ret)
 			DRM_ERROR("RC6 residency sysfs setup failed\n");
 	}
-	if (HAS_RC6p(dev)) {
-		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+	if (HAS_RC6p(dev_priv)) {
+		ret = sysfs_merge_group(&kdev->kobj,
 					&rc6p_attr_group);
 		if (ret)
 			DRM_ERROR("RC6p residency sysfs setup failed\n");
 	}
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
-		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+		ret = sysfs_merge_group(&kdev->kobj,
 					&media_rc6_attr_group);
 		if (ret)
 			DRM_ERROR("Media RC6 residency sysfs setup failed\n");
 	}
 #endif
-	if (HAS_L3_DPF(dev)) {
-		ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
+	if (HAS_L3_DPF(dev_priv)) {
+		ret = device_create_bin_file(kdev, &dpf_attrs);
 		if (ret)
 			DRM_ERROR("l3 parity sysfs setup failed\n");
 
-		if (NUM_L3_SLICES(dev) > 1) {
-			ret = device_create_bin_file(dev->primary->kdev,
+		if (NUM_L3_SLICES(dev_priv) > 1) {
+			ret = device_create_bin_file(kdev,
 						     &dpf_attrs_1);
 			if (ret)
 				DRM_ERROR("l3 parity slice 1 setup failed\n");
@@ -635,30 +610,32 @@ void i915_setup_sysfs(struct drm_device *dev)
 	}
 
 	ret = 0;
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-		ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
-	else if (INTEL_INFO(dev)->gen >= 6)
-		ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		ret = sysfs_create_files(&kdev->kobj, vlv_attrs);
+	else if (INTEL_GEN(dev_priv) >= 6)
+		ret = sysfs_create_files(&kdev->kobj, gen6_attrs);
 	if (ret)
 		DRM_ERROR("RPS sysfs setup failed\n");
 
-	ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
+	ret = sysfs_create_bin_file(&kdev->kobj,
 				    &error_state_attr);
 	if (ret)
 		DRM_ERROR("error_state sysfs setup failed\n");
 }
 
-void i915_teardown_sysfs(struct drm_device *dev)
+void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
 {
-	sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
-	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-		sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
+	struct device *kdev = dev_priv->drm.primary->kdev;
+
+	sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		sysfs_remove_files(&kdev->kobj, vlv_attrs);
 	else
-		sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
-	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs_1);
-	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs);
+		sysfs_remove_files(&kdev->kobj, gen6_attrs);
+	device_remove_bin_file(kdev,  &dpf_attrs_1);
+	device_remove_bin_file(kdev,  &dpf_attrs);
 #ifdef CONFIG_PM
-	sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
-	sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
+	sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group);
+	sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group);
 #endif
 }

+ 0 - 3
drivers/gpu/drm/i915/i915_vgpu.c

@@ -65,9 +65,6 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
 
 	BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
 
-	if (!IS_HASWELL(dev_priv))
-		return;
-
 	magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
 	if (magic != VGT_MAGIC)
 		return;

+ 26 - 31
drivers/gpu/drm/i915/intel_audio.c

@@ -359,9 +359,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-	struct intel_digital_port *intel_dig_port =
-		enc_to_dig_port(&encoder->base);
-	enum port port = intel_dig_port->port;
+	enum port port = enc_to_dig_port(&encoder->base)->port;
 	enum pipe pipe = intel_crtc->pipe;
 	uint32_t tmp, eldv;
 	i915_reg_t aud_config, aud_cntrl_st2;
@@ -407,13 +405,10 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-	struct intel_digital_port *intel_dig_port =
-		enc_to_dig_port(&encoder->base);
-	enum port port = intel_dig_port->port;
+	enum port port = enc_to_dig_port(&encoder->base)->port;
 	enum pipe pipe = intel_crtc->pipe;
 	uint8_t *eld = connector->eld;
-	uint32_t eldv;
-	uint32_t tmp;
+	uint32_t tmp, eldv;
 	int len, i;
 	i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
 
@@ -581,26 +576,26 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
 	}
 }
 
-static void i915_audio_component_get_power(struct device *dev)
+static void i915_audio_component_get_power(struct device *kdev)
 {
-	intel_display_power_get(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
+	intel_display_power_get(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
 }
 
-static void i915_audio_component_put_power(struct device *dev)
+static void i915_audio_component_put_power(struct device *kdev)
 {
-	intel_display_power_put(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
+	intel_display_power_put(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
 }
 
-static void i915_audio_component_codec_wake_override(struct device *dev,
+static void i915_audio_component_codec_wake_override(struct device *kdev,
 						     bool enable)
 {
-	struct drm_i915_private *dev_priv = dev_to_i915(dev);
+	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
 	u32 tmp;
 
 	if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
 		return;
 
-	i915_audio_component_get_power(dev);
+	i915_audio_component_get_power(kdev);
 
 	/*
 	 * Enable/disable generating the codec wake signal, overriding the
@@ -618,13 +613,13 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
 		usleep_range(1000, 1500);
 	}
 
-	i915_audio_component_put_power(dev);
+	i915_audio_component_put_power(kdev);
 }
 
 /* Get CDCLK in kHz  */
-static int i915_audio_component_get_cdclk_freq(struct device *dev)
+static int i915_audio_component_get_cdclk_freq(struct device *kdev)
 {
-	struct drm_i915_private *dev_priv = dev_to_i915(dev);
+	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
 
 	if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
 		return -ENODEV;
@@ -632,10 +627,10 @@ static int i915_audio_component_get_cdclk_freq(struct device *dev)
 	return dev_priv->cdclk_freq;
 }
 
-static int i915_audio_component_sync_audio_rate(struct device *dev,
+static int i915_audio_component_sync_audio_rate(struct device *kdev,
 						int port, int rate)
 {
-	struct drm_i915_private *dev_priv = dev_to_i915(dev);
+	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
 	struct intel_encoder *intel_encoder;
 	struct intel_crtc *crtc;
 	struct drm_display_mode *mode;
@@ -652,7 +647,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
 	    !IS_HASWELL(dev_priv))
 		return 0;
 
-	i915_audio_component_get_power(dev);
+	i915_audio_component_get_power(kdev);
 	mutex_lock(&dev_priv->av_mutex);
 	/* 1. get the pipe */
 	intel_encoder = dev_priv->dig_port_map[port];
@@ -703,15 +698,15 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
 
  unlock:
 	mutex_unlock(&dev_priv->av_mutex);
-	i915_audio_component_put_power(dev);
+	i915_audio_component_put_power(kdev);
 	return err;
 }
 
-static int i915_audio_component_get_eld(struct device *dev, int port,
+static int i915_audio_component_get_eld(struct device *kdev, int port,
 					bool *enabled,
 					unsigned char *buf, int max_bytes)
 {
-	struct drm_i915_private *dev_priv = dev_to_i915(dev);
+	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
 	struct intel_encoder *intel_encoder;
 	struct intel_digital_port *intel_dig_port;
 	const u8 *eld;
@@ -745,11 +740,11 @@ static const struct i915_audio_component_ops i915_audio_component_ops = {
 	.get_eld	= i915_audio_component_get_eld,
 };
 
-static int i915_audio_component_bind(struct device *i915_dev,
-				     struct device *hda_dev, void *data)
+static int i915_audio_component_bind(struct device *i915_kdev,
+				     struct device *hda_kdev, void *data)
 {
 	struct i915_audio_component *acomp = data;
-	struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
+	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
 	int i;
 
 	if (WARN_ON(acomp->ops || acomp->dev))
@@ -757,7 +752,7 @@ static int i915_audio_component_bind(struct device *i915_dev,
 
 	drm_modeset_lock_all(&dev_priv->drm);
 	acomp->ops = &i915_audio_component_ops;
-	acomp->dev = i915_dev;
+	acomp->dev = i915_kdev;
 	BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
 	for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
 		acomp->aud_sample_rate[i] = 0;
@@ -767,11 +762,11 @@ static int i915_audio_component_bind(struct device *i915_dev,
 	return 0;
 }
 
-static void i915_audio_component_unbind(struct device *i915_dev,
-					struct device *hda_dev, void *data)
+static void i915_audio_component_unbind(struct device *i915_kdev,
+					struct device *hda_kdev, void *data)
 {
 	struct i915_audio_component *acomp = data;
-	struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
+	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
 
 	drm_modeset_lock_all(&dev_priv->drm);
 	acomp->ops = NULL;

+ 3 - 0
drivers/gpu/drm/i915/intel_breadcrumbs.c

@@ -462,7 +462,10 @@ static int intel_breadcrumbs_signaler(void *arg)
 			 */
 			intel_engine_remove_wait(engine,
 						 &request->signaling.wait);
+
+			local_bh_disable();
 			fence_signal(&request->fence);
+			local_bh_enable(); /* kick start the tasklets */
 
 			/* Find the next oldest signal. Note that as we have
 			 * not been holding the lock, another client may

+ 14 - 10
drivers/gpu/drm/i915/intel_color.c

@@ -100,13 +100,14 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int i, pipe = intel_crtc->pipe;
 	uint16_t coeffs[9] = { 0, };
+	struct intel_crtc_state *intel_crtc_state = to_intel_crtc_state(crtc_state);
 
 	if (crtc_state->ctm) {
 		struct drm_color_ctm *ctm =
 			(struct drm_color_ctm *)crtc_state->ctm->data;
 		uint64_t input[9] = { 0, };
 
-		if (intel_crtc->config->limited_color_range) {
+		if (intel_crtc_state->limited_color_range) {
 			ctm_mult_by_limited(input, ctm->matrix);
 		} else {
 			for (i = 0; i < ARRAY_SIZE(input); i++)
@@ -158,7 +159,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
 		 * into consideration.
 		 */
 		for (i = 0; i < 3; i++) {
-			if (intel_crtc->config->limited_color_range)
+			if (intel_crtc_state->limited_color_range)
 				coeffs[i * 3 + i] =
 					I9XX_CSC_COEFF_LIMITED_RANGE;
 			else
@@ -182,7 +183,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
 	if (INTEL_INFO(dev)->gen > 6) {
 		uint16_t postoff = 0;
 
-		if (intel_crtc->config->limited_color_range)
+		if (intel_crtc_state->limited_color_range)
 			postoff = (16 * (1 << 12) / 255) & 0x1fff;
 
 		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
@@ -193,7 +194,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
 	} else {
 		uint32_t mode = CSC_MODE_YUV_TO_RGB;
 
-		if (intel_crtc->config->limited_color_range)
+		if (intel_crtc_state->limited_color_range)
 			mode |= CSC_BLACK_SCREEN_OFFSET;
 
 		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
@@ -263,7 +264,8 @@ void intel_color_set_csc(struct drm_crtc_state *crtc_state)
 
 /* Loads the legacy palette/gamma unit for the CRTC. */
 static void i9xx_load_luts_internal(struct drm_crtc *crtc,
-				    struct drm_property_blob *blob)
+				    struct drm_property_blob *blob,
+				    struct intel_crtc_state *crtc_state)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -272,7 +274,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
 	int i;
 
 	if (HAS_GMCH_DISPLAY(dev)) {
-		if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI))
+		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
 			assert_dsi_pll_enabled(dev_priv);
 		else
 			assert_pll_enabled(dev_priv, pipe);
@@ -305,7 +307,8 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
 
 static void i9xx_load_luts(struct drm_crtc_state *crtc_state)
 {
-	i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut);
+	i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut,
+				to_intel_crtc_state(crtc_state));
 }
 
 /* Loads the legacy palette/gamma unit for the CRTC on Haswell. */
@@ -323,7 +326,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
 	 * Workaround : Do not read or write the pipe palette/gamma data while
 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
 	 */
-	if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
+	if (IS_HASWELL(dev) && intel_crtc_state->ips_enabled &&
 	    (intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
 		hsw_disable_ips(intel_crtc);
 		reenable_ips = true;
@@ -436,7 +439,8 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
 		/* Turn off degamma/gamma on CGM block. */
 		I915_WRITE(CGM_PIPE_MODE(pipe),
 			   (state->ctm ? CGM_PIPE_MODE_CSC : 0));
-		i9xx_load_luts_internal(crtc, state->gamma_lut);
+		i9xx_load_luts_internal(crtc, state->gamma_lut,
+					to_intel_crtc_state(state));
 		return;
 	}
 
@@ -479,7 +483,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
 	 * Also program a linear LUT in the legacy block (behind the
 	 * CGM block).
 	 */
-	i9xx_load_luts_internal(crtc, NULL);
+	i9xx_load_luts_internal(crtc, NULL, to_intel_crtc_state(state));
 }
 
 void intel_color_load_luts(struct drm_crtc_state *crtc_state)

+ 37 - 11
drivers/gpu/drm/i915/intel_crt.c

@@ -143,13 +143,15 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
 
 /* Note: The caller is required to filter out dpms modes not supported by the
  * platform. */
-static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
+static void intel_crt_set_dpms(struct intel_encoder *encoder,
+			       struct intel_crtc_state *crtc_state,
+			       int mode)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crt *crt = intel_encoder_to_crt(encoder);
-	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
 	u32 adpa;
 
 	if (INTEL_INFO(dev)->gen >= 5)
@@ -193,23 +195,45 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
 	I915_WRITE(crt->adpa_reg, adpa);
 }
 
-static void intel_disable_crt(struct intel_encoder *encoder)
+static void intel_disable_crt(struct intel_encoder *encoder,
+			      struct intel_crtc_state *old_crtc_state,
+			      struct drm_connector_state *old_conn_state)
 {
-	intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
+	intel_crt_set_dpms(encoder, old_crtc_state, DRM_MODE_DPMS_OFF);
 }
 
-static void pch_disable_crt(struct intel_encoder *encoder)
+static void pch_disable_crt(struct intel_encoder *encoder,
+			    struct intel_crtc_state *old_crtc_state,
+			    struct drm_connector_state *old_conn_state)
 {
 }
 
-static void pch_post_disable_crt(struct intel_encoder *encoder)
+static void pch_post_disable_crt(struct intel_encoder *encoder,
+				 struct intel_crtc_state *old_crtc_state,
+				 struct drm_connector_state *old_conn_state)
 {
-	intel_disable_crt(encoder);
+	intel_disable_crt(encoder, old_crtc_state, old_conn_state);
 }
 
-static void intel_enable_crt(struct intel_encoder *encoder)
+static void hsw_post_disable_crt(struct intel_encoder *encoder,
+				 struct intel_crtc_state *old_crtc_state,
+				 struct drm_connector_state *old_conn_state)
 {
-	intel_crt_set_dpms(encoder, DRM_MODE_DPMS_ON);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+	pch_post_disable_crt(encoder, old_crtc_state, old_conn_state);
+
+	lpt_disable_pch_transcoder(dev_priv);
+	lpt_disable_iclkip(dev_priv);
+
+	intel_ddi_fdi_post_disable(encoder, old_crtc_state, old_conn_state);
+}
+
+static void intel_enable_crt(struct intel_encoder *encoder,
+			     struct intel_crtc_state *pipe_config,
+			     struct drm_connector_state *conn_state)
+{
+	intel_crt_set_dpms(encoder, pipe_config, DRM_MODE_DPMS_ON);
 }
 
 static enum drm_mode_status
@@ -253,7 +277,8 @@ intel_crt_mode_valid(struct drm_connector *connector,
 }
 
 static bool intel_crt_compute_config(struct intel_encoder *encoder,
-				     struct intel_crtc_state *pipe_config)
+				     struct intel_crtc_state *pipe_config,
+				     struct drm_connector_state *conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 
@@ -894,6 +919,7 @@ void intel_crt_init(struct drm_device *dev)
 	if (HAS_DDI(dev)) {
 		crt->base.get_config = hsw_crt_get_config;
 		crt->base.get_hw_state = intel_ddi_get_hw_state;
+		crt->base.post_disable = hsw_post_disable_crt;
 	} else {
 		crt->base.get_config = intel_crt_get_config;
 		crt->base.get_hw_state = intel_crt_get_hw_state;

+ 4 - 4
drivers/gpu/drm/i915/intel_csr.c

@@ -34,15 +34,15 @@
  * low-power state and comes back to normal.
  */
 
-#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
+#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
 MODULE_FIRMWARE(I915_CSR_KBL);
 #define KBL_CSR_VERSION_REQUIRED	CSR_VERSION(1, 1)
 
-#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
+#define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin"
 MODULE_FIRMWARE(I915_CSR_SKL);
-#define SKL_CSR_VERSION_REQUIRED	CSR_VERSION(1, 23)
+#define SKL_CSR_VERSION_REQUIRED	CSR_VERSION(1, 26)
 
-#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
+#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
 MODULE_FIRMWARE(I915_CSR_BXT);
 #define BXT_CSR_VERSION_REQUIRED	CSR_VERSION(1, 7)
 

+ 185 - 91
drivers/gpu/drm/i915/intel_ddi.c

@@ -546,6 +546,27 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
 	DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
 }
 
+static uint32_t hsw_pll_to_ddi_pll_sel(struct intel_shared_dpll *pll)
+{
+	switch (pll->id) {
+	case DPLL_ID_WRPLL1:
+		return PORT_CLK_SEL_WRPLL1;
+	case DPLL_ID_WRPLL2:
+		return PORT_CLK_SEL_WRPLL2;
+	case DPLL_ID_SPLL:
+		return PORT_CLK_SEL_SPLL;
+	case DPLL_ID_LCPLL_810:
+		return PORT_CLK_SEL_LCPLL_810;
+	case DPLL_ID_LCPLL_1350:
+		return PORT_CLK_SEL_LCPLL_1350;
+	case DPLL_ID_LCPLL_2700:
+		return PORT_CLK_SEL_LCPLL_2700;
+	default:
+		MISSING_CASE(pll->id);
+		return PORT_CLK_SEL_NONE;
+	}
+}
+
 /* Starting with Haswell, different DDI ports can work in FDI mode for
  * connection to the PCH-located connectors. For this, it is necessary to train
  * both the DDI port and PCH receiver for the desired DDI buffer settings.
@@ -561,7 +582,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_encoder *encoder;
-	u32 temp, i, rx_ctl_val;
+	u32 temp, i, rx_ctl_val, ddi_pll_sel;
 
 	for_each_encoder_on_crtc(dev, crtc, encoder) {
 		WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG);
@@ -592,8 +613,9 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
 	I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
 
 	/* Configure Port Clock Select */
-	I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config->ddi_pll_sel);
-	WARN_ON(intel_crtc->config->ddi_pll_sel != PORT_CLK_SEL_SPLL);
+	ddi_pll_sel = hsw_pll_to_ddi_pll_sel(intel_crtc->config->shared_dpll);
+	I915_WRITE(PORT_CLK_SEL(PORT_E), ddi_pll_sel);
+	WARN_ON(ddi_pll_sel != PORT_CLK_SEL_SPLL);
 
 	/* Start the training iterating through available voltages and emphasis,
 	 * testing each value twice. */
@@ -870,7 +892,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
 	int link_clock = 0;
 	uint32_t dpll_ctl1, dpll;
 
-	dpll = pipe_config->ddi_pll_sel;
+	dpll = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
 
 	dpll_ctl1 = I915_READ(DPLL_CTRL1);
 
@@ -918,7 +940,7 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
 	int link_clock = 0;
 	u32 val, pll;
 
-	val = pipe_config->ddi_pll_sel;
+	val = hsw_pll_to_ddi_pll_sel(pipe_config->shared_dpll);
 	switch (val & PORT_CLK_SEL_MASK) {
 	case PORT_CLK_SEL_LCPLL_810:
 		link_clock = 81000;
@@ -1586,13 +1608,15 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
 }
 
 void intel_ddi_clk_select(struct intel_encoder *encoder,
-			  const struct intel_crtc_state *pipe_config)
+			  struct intel_shared_dpll *pll)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum port port = intel_ddi_get_encoder_port(encoder);
 
+	if (WARN_ON(!pll))
+		return;
+
 	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
-		uint32_t dpll = pipe_config->ddi_pll_sel;
 		uint32_t val;
 
 		/* DDI -> PLL mapping  */
@@ -1600,70 +1624,91 @@ void intel_ddi_clk_select(struct intel_encoder *encoder,
 
 		val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
 			DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
-		val |= (DPLL_CTRL2_DDI_CLK_SEL(dpll, port) |
+		val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->id, port) |
 			DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
 
 		I915_WRITE(DPLL_CTRL2, val);
 
 	} else if (INTEL_INFO(dev_priv)->gen < 9) {
-		WARN_ON(pipe_config->ddi_pll_sel == PORT_CLK_SEL_NONE);
-		I915_WRITE(PORT_CLK_SEL(port), pipe_config->ddi_pll_sel);
+		I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
 	}
 }
 
-static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
+static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
+				    int link_rate, uint32_t lane_count,
+				    struct intel_shared_dpll *pll,
+				    bool link_mst)
 {
-	struct drm_encoder *encoder = &intel_encoder->base;
-	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
-	struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
-	enum port port = intel_ddi_get_encoder_port(intel_encoder);
-	int type = intel_encoder->type;
-
-	if (type == INTEL_OUTPUT_HDMI) {
-		struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-
-		intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
-	}
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum port port = intel_ddi_get_encoder_port(encoder);
 
-	if (type == INTEL_OUTPUT_EDP) {
-		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	intel_dp_set_link_params(intel_dp, link_rate, lane_count,
+				 link_mst);
+	if (encoder->type == INTEL_OUTPUT_EDP)
 		intel_edp_panel_on(intel_dp);
-	}
-
-	intel_ddi_clk_select(intel_encoder, crtc->config);
 
-	if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
-		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
-		intel_prepare_dp_ddi_buffers(intel_encoder);
-
-		intel_dp_set_link_params(intel_dp, crtc->config);
+	intel_ddi_clk_select(encoder, pll);
+	intel_prepare_dp_ddi_buffers(encoder);
+	intel_ddi_init_dp_buf_reg(encoder);
+	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+	intel_dp_start_link_train(intel_dp);
+	if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
+		intel_dp_stop_link_train(intel_dp);
+}
 
-		intel_ddi_init_dp_buf_reg(intel_encoder);
+static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
+				      bool has_hdmi_sink,
+				      struct drm_display_mode *adjusted_mode,
+				      struct intel_shared_dpll *pll)
+{
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_encoder *drm_encoder = &encoder->base;
+	enum port port = intel_ddi_get_encoder_port(encoder);
+	int level = intel_ddi_hdmi_level(dev_priv, port);
 
-		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
-		intel_dp_start_link_train(intel_dp);
-		if (port != PORT_A || INTEL_INFO(dev_priv)->gen >= 9)
-			intel_dp_stop_link_train(intel_dp);
-	} else if (type == INTEL_OUTPUT_HDMI) {
-		struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-		int level = intel_ddi_hdmi_level(dev_priv, port);
+	intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
+	intel_ddi_clk_select(encoder, pll);
+	intel_prepare_hdmi_ddi_buffers(encoder);
+	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+		skl_ddi_set_iboost(encoder, level);
+	else if (IS_BROXTON(dev_priv))
+		bxt_ddi_vswing_sequence(dev_priv, level, port,
+					INTEL_OUTPUT_HDMI);
 
-		intel_prepare_hdmi_ddi_buffers(intel_encoder);
+	intel_hdmi->set_infoframes(drm_encoder,
+				   has_hdmi_sink,
+				   adjusted_mode);
+}
 
-		if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
-			skl_ddi_set_iboost(intel_encoder, level);
-		else if (IS_BROXTON(dev_priv))
-			bxt_ddi_vswing_sequence(dev_priv, level, port,
-						INTEL_OUTPUT_HDMI);
+static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder,
+				 struct intel_crtc_state *pipe_config,
+				 struct drm_connector_state *conn_state)
+{
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+	int type = intel_encoder->type;
 
-		intel_hdmi->set_infoframes(encoder,
-					   crtc->config->has_hdmi_sink,
-					   &crtc->config->base.adjusted_mode);
+	if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
+		intel_ddi_pre_enable_dp(intel_encoder,
+					crtc->config->port_clock,
+					crtc->config->lane_count,
+					crtc->config->shared_dpll,
+					intel_crtc_has_type(crtc->config,
+							    INTEL_OUTPUT_DP_MST));
+	}
+	if (type == INTEL_OUTPUT_HDMI) {
+		intel_ddi_pre_enable_hdmi(intel_encoder,
+					  crtc->config->has_hdmi_sink,
+					  &crtc->config->base.adjusted_mode,
+					  crtc->config->shared_dpll);
 	}
 }
 
-static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
+static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
+				   struct intel_crtc_state *old_crtc_state,
+				   struct drm_connector_state *old_conn_state)
 {
 	struct drm_encoder *encoder = &intel_encoder->base;
 	struct drm_device *dev = encoder->dev;
@@ -1673,6 +1718,8 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
 	uint32_t val;
 	bool wait = false;
 
+	/* old_crtc_state and old_conn_state are NULL when called from DP_MST */
+
 	val = I915_READ(DDI_BUF_CTL(port));
 	if (val & DDI_BUF_CTL_ENABLE) {
 		val &= ~DDI_BUF_CTL_ENABLE;
@@ -1708,7 +1755,42 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
 	}
 }
 
-static void intel_enable_ddi(struct intel_encoder *intel_encoder)
+void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
+				struct intel_crtc_state *old_crtc_state,
+				struct drm_connector_state *old_conn_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+	uint32_t val;
+
+	/*
+	 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
+	 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
+	 * step 13 is the correct place for it. Step 18 is where it was
+	 * originally before the BUN.
+	 */
+	val = I915_READ(FDI_RX_CTL(PIPE_A));
+	val &= ~FDI_RX_ENABLE;
+	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+
+	intel_ddi_post_disable(intel_encoder, old_crtc_state, old_conn_state);
+
+	val = I915_READ(FDI_RX_MISC(PIPE_A));
+	val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+	val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+	I915_WRITE(FDI_RX_MISC(PIPE_A), val);
+
+	val = I915_READ(FDI_RX_CTL(PIPE_A));
+	val &= ~FDI_PCDCLK;
+	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+
+	val = I915_READ(FDI_RX_CTL(PIPE_A));
+	val &= ~FDI_RX_PLL_ENABLE;
+	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+}
+
+static void intel_enable_ddi(struct intel_encoder *intel_encoder,
+			     struct intel_crtc_state *pipe_config,
+			     struct drm_connector_state *conn_state)
 {
 	struct drm_encoder *encoder = &intel_encoder->base;
 	struct drm_crtc *crtc = encoder->crtc;
@@ -1737,7 +1819,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
 
 		intel_edp_backlight_on(intel_dp);
 		intel_psr_enable(intel_dp);
-		intel_edp_drrs_enable(intel_dp);
+		intel_edp_drrs_enable(intel_dp, pipe_config);
 	}
 
 	if (intel_crtc->config->has_audio) {
@@ -1746,7 +1828,9 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
 	}
 }
 
-static void intel_disable_ddi(struct intel_encoder *intel_encoder)
+static void intel_disable_ddi(struct intel_encoder *intel_encoder,
+			      struct intel_crtc_state *old_crtc_state,
+			      struct drm_connector_state *old_conn_state)
 {
 	struct drm_encoder *encoder = &intel_encoder->base;
 	struct drm_crtc *crtc = encoder->crtc;
@@ -1763,7 +1847,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
 	if (type == INTEL_OUTPUT_EDP) {
 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
-		intel_edp_drrs_disable(intel_dp);
+		intel_edp_drrs_disable(intel_dp, old_crtc_state);
 		intel_psr_disable(intel_dp);
 		intel_edp_backlight_off(intel_dp);
 	}
@@ -2052,7 +2136,9 @@ bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
 	}
 }
 
-static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder)
+static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
+				   struct intel_crtc_state *pipe_config,
+				   struct drm_connector_state *conn_state)
 {
 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
 	struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
@@ -2141,38 +2227,6 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
 	udelay(600);
 }
 
-void intel_ddi_fdi_disable(struct drm_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
-	uint32_t val;
-
-	/*
-	 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
-	 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
-	 * step 13 is the correct place for it. Step 18 is where it was
-	 * originally before the BUN.
-	 */
-	val = I915_READ(FDI_RX_CTL(PIPE_A));
-	val &= ~FDI_RX_ENABLE;
-	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
-
-	intel_ddi_post_disable(intel_encoder);
-
-	val = I915_READ(FDI_RX_MISC(PIPE_A));
-	val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
-	val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
-	I915_WRITE(FDI_RX_MISC(PIPE_A), val);
-
-	val = I915_READ(FDI_RX_CTL(PIPE_A));
-	val &= ~FDI_PCDCLK;
-	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
-
-	val = I915_READ(FDI_RX_CTL(PIPE_A));
-	val &= ~FDI_RX_PLL_ENABLE;
-	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
-}
-
 void intel_ddi_get_config(struct intel_encoder *encoder,
 			  struct intel_crtc_state *pipe_config)
 {
@@ -2272,7 +2326,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
 }
 
 static bool intel_ddi_compute_config(struct intel_encoder *encoder,
-				     struct intel_crtc_state *pipe_config)
+				     struct intel_crtc_state *pipe_config,
+				     struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	int type = encoder->type;
@@ -2285,9 +2340,9 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
 		pipe_config->cpu_transcoder = TRANSCODER_EDP;
 
 	if (type == INTEL_OUTPUT_HDMI)
-		ret = intel_hdmi_compute_config(encoder, pipe_config);
+		ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
 	else
-		ret = intel_dp_compute_config(encoder, pipe_config);
+		ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
 
 	if (IS_BROXTON(dev_priv) && ret)
 		pipe_config->lane_lat_optim_mask =
@@ -2338,6 +2393,45 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
 	return connector;
 }
 
+struct intel_shared_dpll *
+intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock)
+{
+	struct intel_connector *connector = intel_dp->attached_connector;
+	struct intel_encoder *encoder = connector->encoder;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct intel_shared_dpll *pll = NULL;
+	struct intel_shared_dpll_config tmp_pll_config;
+	enum intel_dpll_id dpll_id;
+
+	if (IS_BROXTON(dev_priv)) {
+		dpll_id =  (enum intel_dpll_id)dig_port->port;
+		/*
+		 * Select the required PLL. This works for platforms where
+		 * there is no shared DPLL.
+		 */
+		pll = &dev_priv->shared_dplls[dpll_id];
+		if (WARN_ON(pll->active_mask)) {
+
+			DRM_ERROR("Shared DPLL in use. active_mask:%x\n",
+				  pll->active_mask);
+			return NULL;
+		}
+		tmp_pll_config = pll->config;
+		if (!bxt_ddi_dp_set_dpll_hw_state(clock,
+						  &pll->config.hw_state)) {
+			DRM_ERROR("Could not setup DPLL\n");
+			pll->config = tmp_pll_config;
+			return NULL;
+		}
+	} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+		pll = skl_find_link_pll(dev_priv, clock);
+	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+		pll = hsw_ddi_dp_get_dpll(encoder, clock);
+	}
+	return pll;
+}
+
 void intel_ddi_init(struct drm_device *dev, enum port port)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);

+ 70 - 70
drivers/gpu/drm/i915/intel_device_info.c

@@ -46,71 +46,70 @@ void intel_device_info_dump(struct drm_i915_private *dev_priv)
 
 static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
 {
-	struct intel_device_info *info = mkwrite_device_info(dev_priv);
+	struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
 	u32 fuse, eu_dis;
 
 	fuse = I915_READ(CHV_FUSE_GT);
 
-	info->slice_total = 1;
+	sseu->slice_mask = BIT(0);
 
 	if (!(fuse & CHV_FGT_DISABLE_SS0)) {
-		info->subslice_per_slice++;
+		sseu->subslice_mask |= BIT(0);
 		eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
 				 CHV_FGT_EU_DIS_SS0_R1_MASK);
-		info->eu_total += 8 - hweight32(eu_dis);
+		sseu->eu_total += 8 - hweight32(eu_dis);
 	}
 
 	if (!(fuse & CHV_FGT_DISABLE_SS1)) {
-		info->subslice_per_slice++;
+		sseu->subslice_mask |= BIT(1);
 		eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
 				 CHV_FGT_EU_DIS_SS1_R1_MASK);
-		info->eu_total += 8 - hweight32(eu_dis);
+		sseu->eu_total += 8 - hweight32(eu_dis);
 	}
 
-	info->subslice_total = info->subslice_per_slice;
 	/*
 	 * CHV expected to always have a uniform distribution of EU
 	 * across subslices.
 	*/
-	info->eu_per_subslice = info->subslice_total ?
-				info->eu_total / info->subslice_total :
+	sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+				sseu->eu_total / sseu_subslice_total(sseu) :
 				0;
 	/*
 	 * CHV supports subslice power gating on devices with more than
 	 * one subslice, and supports EU power gating on devices with
 	 * more than one EU pair per subslice.
 	*/
-	info->has_slice_pg = 0;
-	info->has_subslice_pg = (info->subslice_total > 1);
-	info->has_eu_pg = (info->eu_per_subslice > 2);
+	sseu->has_slice_pg = 0;
+	sseu->has_subslice_pg = sseu_subslice_total(sseu) > 1;
+	sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
 }
 
 static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
 {
 	struct intel_device_info *info = mkwrite_device_info(dev_priv);
+	struct sseu_dev_info *sseu = &info->sseu;
 	int s_max = 3, ss_max = 4, eu_max = 8;
 	int s, ss;
-	u32 fuse2, s_enable, ss_disable, eu_disable;
+	u32 fuse2, eu_disable;
 	u8 eu_mask = 0xff;
 
 	fuse2 = I915_READ(GEN8_FUSE2);
-	s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
-	ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> GEN9_F2_SS_DIS_SHIFT;
+	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
 
-	info->slice_total = hweight32(s_enable);
 	/*
 	 * The subslice disable field is global, i.e. it applies
 	 * to each of the enabled slices.
 	*/
-	info->subslice_per_slice = ss_max - hweight32(ss_disable);
-	info->subslice_total = info->slice_total * info->subslice_per_slice;
+	sseu->subslice_mask = (1 << ss_max) - 1;
+	sseu->subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
+				 GEN9_F2_SS_DIS_SHIFT);
 
 	/*
 	 * Iterate through enabled slices and subslices to
 	 * count the total enabled EU.
 	*/
 	for (s = 0; s < s_max; s++) {
-		if (!(s_enable & BIT(s)))
+		if (!(sseu->slice_mask & BIT(s)))
 			/* skip disabled slice */
 			continue;
 
@@ -118,7 +117,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
 		for (ss = 0; ss < ss_max; ss++) {
 			int eu_per_ss;
 
-			if (ss_disable & BIT(ss))
+			if (!(sseu->subslice_mask & BIT(ss)))
 				/* skip disabled subslice */
 				continue;
 
@@ -131,9 +130,9 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
 			 * subslices if they are unbalanced.
 			 */
 			if (eu_per_ss == 7)
-				info->subslice_7eu[s] |= BIT(ss);
+				sseu->subslice_7eu[s] |= BIT(ss);
 
-			info->eu_total += eu_per_ss;
+			sseu->eu_total += eu_per_ss;
 		}
 	}
 
@@ -144,9 +143,9 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
 	 * recovery. BXT is expected to be perfectly uniform in EU
 	 * distribution.
 	*/
-	info->eu_per_subslice = info->subslice_total ?
-				DIV_ROUND_UP(info->eu_total,
-					     info->subslice_total) : 0;
+	sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+				DIV_ROUND_UP(sseu->eu_total,
+					     sseu_subslice_total(sseu)) : 0;
 	/*
 	 * SKL supports slice power gating on devices with more than
 	 * one slice, and supports EU power gating on devices with
@@ -155,15 +154,15 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
 	 * supports EU power gating on devices with more than one EU
 	 * pair per subslice.
 	*/
-	info->has_slice_pg =
+	sseu->has_slice_pg =
 		(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
-		info->slice_total > 1;
-	info->has_subslice_pg =
-		IS_BROXTON(dev_priv) && info->subslice_total > 1;
-	info->has_eu_pg = info->eu_per_subslice > 2;
+		hweight8(sseu->slice_mask) > 1;
+	sseu->has_subslice_pg =
+		IS_BROXTON(dev_priv) && sseu_subslice_total(sseu) > 1;
+	sseu->has_eu_pg = sseu->eu_per_subslice > 2;
 
 	if (IS_BROXTON(dev_priv)) {
-#define IS_SS_DISABLED(_ss_disable, ss)    (_ss_disable & BIT(ss))
+#define IS_SS_DISABLED(ss)	(!(sseu->subslice_mask & BIT(ss)))
 		/*
 		 * There is a HW issue in 2x6 fused down parts that requires
 		 * Pooled EU to be enabled as a WA. The pool configuration
@@ -171,19 +170,18 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
 		 * doesn't affect if the device has all 3 subslices enabled.
 		 */
 		/* WaEnablePooledEuFor2x6:bxt */
-		info->has_pooled_eu = ((info->subslice_per_slice == 3) ||
-				       (info->subslice_per_slice == 2 &&
+		info->has_pooled_eu = ((hweight8(sseu->subslice_mask) == 3) ||
+				       (hweight8(sseu->subslice_mask) == 2 &&
 					INTEL_REVID(dev_priv) < BXT_REVID_C0));
 
-		info->min_eu_in_pool = 0;
+		sseu->min_eu_in_pool = 0;
 		if (info->has_pooled_eu) {
-			if (IS_SS_DISABLED(ss_disable, 0) ||
-			    IS_SS_DISABLED(ss_disable, 2))
-				info->min_eu_in_pool = 3;
-			else if (IS_SS_DISABLED(ss_disable, 1))
-				info->min_eu_in_pool = 6;
+			if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
+				sseu->min_eu_in_pool = 3;
+			else if (IS_SS_DISABLED(1))
+				sseu->min_eu_in_pool = 6;
 			else
-				info->min_eu_in_pool = 9;
+				sseu->min_eu_in_pool = 9;
 		}
 #undef IS_SS_DISABLED
 	}
@@ -191,14 +189,20 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
 
 static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
 {
-	struct intel_device_info *info = mkwrite_device_info(dev_priv);
+	struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
 	const int s_max = 3, ss_max = 3, eu_max = 8;
 	int s, ss;
-	u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
+	u32 fuse2, eu_disable[s_max];
 
 	fuse2 = I915_READ(GEN8_FUSE2);
-	s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
-	ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
+	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+	/*
+	 * The subslice disable field is global, i.e. it applies
+	 * to each of the enabled slices.
+	 */
+	sseu->subslice_mask = BIT(ss_max) - 1;
+	sseu->subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
+				 GEN8_F2_SS_DIS_SHIFT);
 
 	eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
 	eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
@@ -208,28 +212,19 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
 			((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
 			 (32 - GEN8_EU_DIS1_S2_SHIFT));
 
-	info->slice_total = hweight32(s_enable);
-
-	/*
-	 * The subslice disable field is global, i.e. it applies
-	 * to each of the enabled slices.
-	 */
-	info->subslice_per_slice = ss_max - hweight32(ss_disable);
-	info->subslice_total = info->slice_total * info->subslice_per_slice;
-
 	/*
 	 * Iterate through enabled slices and subslices to
 	 * count the total enabled EU.
 	 */
 	for (s = 0; s < s_max; s++) {
-		if (!(s_enable & (0x1 << s)))
+		if (!(sseu->slice_mask & BIT(s)))
 			/* skip disabled slice */
 			continue;
 
 		for (ss = 0; ss < ss_max; ss++) {
 			u32 n_disabled;
 
-			if (ss_disable & (0x1 << ss))
+			if (!(sseu->subslice_mask & BIT(ss)))
 				/* skip disabled subslice */
 				continue;
 
@@ -239,9 +234,9 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
 			 * Record which subslices have 7 EUs.
 			 */
 			if (eu_max - n_disabled == 7)
-				info->subslice_7eu[s] |= 1 << ss;
+				sseu->subslice_7eu[s] |= 1 << ss;
 
-			info->eu_total += eu_max - n_disabled;
+			sseu->eu_total += eu_max - n_disabled;
 		}
 	}
 
@@ -250,16 +245,17 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
 	 * subslices with the exception that any one EU in any one subslice may
 	 * be fused off for die recovery.
 	 */
-	info->eu_per_subslice = info->subslice_total ?
-		DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
+	sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+				DIV_ROUND_UP(sseu->eu_total,
+					     sseu_subslice_total(sseu)) : 0;
 
 	/*
 	 * BDW supports slice power gating on devices with more than
 	 * one slice.
 	 */
-	info->has_slice_pg = (info->slice_total > 1);
-	info->has_subslice_pg = 0;
-	info->has_eu_pg = 0;
+	sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
+	sseu->has_subslice_pg = 0;
+	sseu->has_eu_pg = 0;
 }
 
 /*
@@ -374,15 +370,19 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
 	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
 		info->has_snoop = false;
 
-	DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
-	DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
-	DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
-	DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
-	DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
+	DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask);
+	DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask));
+	DRM_DEBUG_DRIVER("subslice total: %u\n",
+			 sseu_subslice_total(&info->sseu));
+	DRM_DEBUG_DRIVER("subslice mask %04x\n", info->sseu.subslice_mask);
+	DRM_DEBUG_DRIVER("subslice per slice: %u\n",
+			 hweight8(info->sseu.subslice_mask));
+	DRM_DEBUG_DRIVER("EU total: %u\n", info->sseu.eu_total);
+	DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->sseu.eu_per_subslice);
 	DRM_DEBUG_DRIVER("has slice power gating: %s\n",
-			 info->has_slice_pg ? "y" : "n");
+			 info->sseu.has_slice_pg ? "y" : "n");
 	DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
-			 info->has_subslice_pg ? "y" : "n");
+			 info->sseu.has_subslice_pg ? "y" : "n");
 	DRM_DEBUG_DRIVER("has EU power gating: %s\n",
-			 info->has_eu_pg ? "y" : "n");
+			 info->sseu.has_eu_pg ? "y" : "n");
 }

+ 418 - 199
drivers/gpu/drm/i915/intel_display.c

@@ -1907,7 +1907,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
 	}
 }
 
-static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
 {
 	u32 val;
 
@@ -3377,6 +3377,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
 	struct drm_framebuffer *fb = plane_state->base.fb;
+	const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
 	int pipe = intel_crtc->pipe;
 	u32 plane_ctl;
 	unsigned int rotation = plane_state->base.rotation;
@@ -3410,6 +3411,9 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
 	intel_crtc->adjusted_x = src_x;
 	intel_crtc->adjusted_y = src_y;
 
+	if (wm->dirty_pipes & drm_crtc_mask(&intel_crtc->base))
+		skl_write_plane_wm(intel_crtc, wm, 0);
+
 	I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
 	I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x);
 	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
@@ -3441,7 +3445,15 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	int pipe = to_intel_crtc(crtc)->pipe;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+
+	/*
+	 * We only populate skl_results on watermark updates, and if the
+	 * plane's visiblity isn't actually changing neither is its watermarks.
+	 */
+	if (!crtc->primary->state->visible)
+		skl_write_plane_wm(intel_crtc, &dev_priv->wm.skl_results, 0);
 
 	I915_WRITE(PLANE_CTL(pipe, 0), 0);
 	I915_WRITE(PLANE_SURF(pipe, 0), 0);
@@ -3634,15 +3646,26 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
 	mutex_unlock(&dev->mode_config.mutex);
 }
 
+static bool abort_flip_on_reset(struct intel_crtc *crtc)
+{
+	struct i915_gpu_error *error = &to_i915(crtc->base.dev)->gpu_error;
+
+	if (i915_reset_in_progress(error))
+		return true;
+
+	if (crtc->reset_count != i915_reset_count(error))
+		return true;
+
+	return false;
+}
+
 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	unsigned reset_counter;
 	bool pending;
 
-	reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error);
-	if (intel_crtc->reset_counter != reset_counter)
+	if (abort_flip_on_reset(intel_crtc))
 		return false;
 
 	spin_lock_irq(&dev->event_lock);
@@ -4285,7 +4308,7 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
 	return 0;
 }
 
-static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
+void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
 {
 	u32 temp;
 
@@ -5154,15 +5177,137 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask
 	intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
 }
 
-static void ironlake_crtc_enable(struct drm_crtc *crtc)
+static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
+					  struct intel_crtc_state *crtc_state,
+					  struct drm_atomic_state *old_state)
+{
+	struct drm_connector_state *old_conn_state;
+	struct drm_connector *conn;
+	int i;
+
+	for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+		struct drm_connector_state *conn_state = conn->state;
+		struct intel_encoder *encoder =
+			to_intel_encoder(conn_state->best_encoder);
+
+		if (conn_state->crtc != crtc)
+			continue;
+
+		if (encoder->pre_pll_enable)
+			encoder->pre_pll_enable(encoder, crtc_state, conn_state);
+	}
+}
+
+static void intel_encoders_pre_enable(struct drm_crtc *crtc,
+				      struct intel_crtc_state *crtc_state,
+				      struct drm_atomic_state *old_state)
+{
+	struct drm_connector_state *old_conn_state;
+	struct drm_connector *conn;
+	int i;
+
+	for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+		struct drm_connector_state *conn_state = conn->state;
+		struct intel_encoder *encoder =
+			to_intel_encoder(conn_state->best_encoder);
+
+		if (conn_state->crtc != crtc)
+			continue;
+
+		if (encoder->pre_enable)
+			encoder->pre_enable(encoder, crtc_state, conn_state);
+	}
+}
+
+static void intel_encoders_enable(struct drm_crtc *crtc,
+				  struct intel_crtc_state *crtc_state,
+				  struct drm_atomic_state *old_state)
 {
+	struct drm_connector_state *old_conn_state;
+	struct drm_connector *conn;
+	int i;
+
+	for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+		struct drm_connector_state *conn_state = conn->state;
+		struct intel_encoder *encoder =
+			to_intel_encoder(conn_state->best_encoder);
+
+		if (conn_state->crtc != crtc)
+			continue;
+
+		encoder->enable(encoder, crtc_state, conn_state);
+		intel_opregion_notify_encoder(encoder, true);
+	}
+}
+
+static void intel_encoders_disable(struct drm_crtc *crtc,
+				   struct intel_crtc_state *old_crtc_state,
+				   struct drm_atomic_state *old_state)
+{
+	struct drm_connector_state *old_conn_state;
+	struct drm_connector *conn;
+	int i;
+
+	for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+		struct intel_encoder *encoder =
+			to_intel_encoder(old_conn_state->best_encoder);
+
+		if (old_conn_state->crtc != crtc)
+			continue;
+
+		intel_opregion_notify_encoder(encoder, false);
+		encoder->disable(encoder, old_crtc_state, old_conn_state);
+	}
+}
+
+static void intel_encoders_post_disable(struct drm_crtc *crtc,
+					struct intel_crtc_state *old_crtc_state,
+					struct drm_atomic_state *old_state)
+{
+	struct drm_connector_state *old_conn_state;
+	struct drm_connector *conn;
+	int i;
+
+	for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+		struct intel_encoder *encoder =
+			to_intel_encoder(old_conn_state->best_encoder);
+
+		if (old_conn_state->crtc != crtc)
+			continue;
+
+		if (encoder->post_disable)
+			encoder->post_disable(encoder, old_crtc_state, old_conn_state);
+	}
+}
+
+static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
+					    struct intel_crtc_state *old_crtc_state,
+					    struct drm_atomic_state *old_state)
+{
+	struct drm_connector_state *old_conn_state;
+	struct drm_connector *conn;
+	int i;
+
+	for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+		struct intel_encoder *encoder =
+			to_intel_encoder(old_conn_state->best_encoder);
+
+		if (old_conn_state->crtc != crtc)
+			continue;
+
+		if (encoder->post_pll_disable)
+			encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
+	}
+}
+
+static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
+				 struct drm_atomic_state *old_state)
+{
+	struct drm_crtc *crtc = pipe_config->base.crtc;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
-	struct intel_crtc_state *pipe_config =
-		to_intel_crtc_state(crtc->state);
 
 	if (WARN_ON(intel_crtc->active))
 		return;
@@ -5200,9 +5345,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
 
 	intel_crtc->active = true;
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		if (encoder->pre_enable)
-			encoder->pre_enable(encoder);
+	intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
 	if (intel_crtc->config->has_pch_encoder) {
 		/* Note: FDI PLL enabling _must_ be done before we enable the
@@ -5232,8 +5375,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
 	assert_vblank_disabled(crtc);
 	drm_crtc_vblank_on(crtc);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		encoder->enable(encoder);
+	intel_encoders_enable(crtc, pipe_config, old_state);
 
 	if (HAS_PCH_CPT(dev))
 		cpt_verify_modeset(dev, intel_crtc->pipe);
@@ -5251,16 +5393,15 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
 	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
 }
 
-static void haswell_crtc_enable(struct drm_crtc *crtc)
+static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
+				struct drm_atomic_state *old_state)
 {
+	struct drm_crtc *crtc = pipe_config->base.crtc;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
-	struct intel_crtc_state *pipe_config =
-		to_intel_crtc_state(crtc->state);
 
 	if (WARN_ON(intel_crtc->active))
 		return;
@@ -5269,9 +5410,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
 						      false);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		if (encoder->pre_pll_enable)
-			encoder->pre_pll_enable(encoder);
+	intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
 
 	if (intel_crtc->config->shared_dpll)
 		intel_enable_shared_dpll(intel_crtc);
@@ -5309,10 +5448,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
 	else
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder) {
-		if (encoder->pre_enable)
-			encoder->pre_enable(encoder);
-	}
+	intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
 	if (intel_crtc->config->has_pch_encoder)
 		dev_priv->display.fdi_link_train(crtc);
@@ -5353,10 +5489,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
 	assert_vblank_disabled(crtc);
 	drm_crtc_vblank_on(crtc);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder) {
-		encoder->enable(encoder);
-		intel_opregion_notify_encoder(encoder, true);
-	}
+	intel_encoders_enable(crtc, pipe_config, old_state);
 
 	if (intel_crtc->config->has_pch_encoder) {
 		intel_wait_for_vblank(dev, pipe);
@@ -5390,12 +5523,13 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
 	}
 }
 
-static void ironlake_crtc_disable(struct drm_crtc *crtc)
+static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
+				  struct drm_atomic_state *old_state)
 {
+	struct drm_crtc *crtc = old_crtc_state->base.crtc;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 
 	/*
@@ -5408,8 +5542,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
 		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
 	}
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		encoder->disable(encoder);
+	intel_encoders_disable(crtc, old_crtc_state, old_state);
 
 	drm_crtc_vblank_off(crtc);
 	assert_vblank_disabled(crtc);
@@ -5421,9 +5554,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
 	if (intel_crtc->config->has_pch_encoder)
 		ironlake_fdi_disable(crtc);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		if (encoder->post_disable)
-			encoder->post_disable(encoder);
+	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 
 	if (intel_crtc->config->has_pch_encoder) {
 		ironlake_disable_pch_transcoder(dev_priv, pipe);
@@ -5453,22 +5584,20 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 }
 
-static void haswell_crtc_disable(struct drm_crtc *crtc)
+static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
+				 struct drm_atomic_state *old_state)
 {
+	struct drm_crtc *crtc = old_crtc_state->base.crtc;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_encoder *encoder;
 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 
 	if (intel_crtc->config->has_pch_encoder)
 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
 						      false);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder) {
-		intel_opregion_notify_encoder(encoder, false);
-		encoder->disable(encoder);
-	}
+	intel_encoders_disable(crtc, old_crtc_state, old_state);
 
 	drm_crtc_vblank_off(crtc);
 	assert_vblank_disabled(crtc);
@@ -5491,18 +5620,11 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
 	if (!transcoder_is_dsi(cpu_transcoder))
 		intel_ddi_disable_pipe_clock(intel_crtc);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		if (encoder->post_disable)
-			encoder->post_disable(encoder);
-
-	if (intel_crtc->config->has_pch_encoder) {
-		lpt_disable_pch_transcoder(dev_priv);
-		lpt_disable_iclkip(dev_priv);
-		intel_ddi_fdi_disable(crtc);
+	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 
+	if (old_crtc_state->has_pch_encoder)
 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
 						      true);
-	}
 }
 
 static void i9xx_pfit_enable(struct intel_crtc *crtc)
@@ -6558,14 +6680,13 @@ static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 	intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
 }
 
-static void valleyview_crtc_enable(struct drm_crtc *crtc)
+static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
+				   struct drm_atomic_state *old_state)
 {
+	struct drm_crtc *crtc = pipe_config->base.crtc;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_encoder *encoder;
-	struct intel_crtc_state *pipe_config =
-		to_intel_crtc_state(crtc->state);
 	int pipe = intel_crtc->pipe;
 
 	if (WARN_ON(intel_crtc->active))
@@ -6590,9 +6711,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
 
 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		if (encoder->pre_pll_enable)
-			encoder->pre_pll_enable(encoder);
+	intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
 
 	if (IS_CHERRYVIEW(dev)) {
 		chv_prepare_pll(intel_crtc, intel_crtc->config);
@@ -6602,9 +6721,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
 		vlv_enable_pll(intel_crtc, intel_crtc->config);
 	}
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		if (encoder->pre_enable)
-			encoder->pre_enable(encoder);
+	intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
 	i9xx_pfit_enable(intel_crtc);
 
@@ -6616,8 +6733,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
 	assert_vblank_disabled(crtc);
 	drm_crtc_vblank_on(crtc);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		encoder->enable(encoder);
+	intel_encoders_enable(crtc, pipe_config, old_state);
 }
 
 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
@@ -6629,14 +6745,13 @@ static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
 	I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
 }
 
-static void i9xx_crtc_enable(struct drm_crtc *crtc)
+static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
+			     struct drm_atomic_state *old_state)
 {
+	struct drm_crtc *crtc = pipe_config->base.crtc;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_encoder *encoder;
-	struct intel_crtc_state *pipe_config =
-		to_intel_crtc_state(crtc->state);
 	enum pipe pipe = intel_crtc->pipe;
 
 	if (WARN_ON(intel_crtc->active))
@@ -6657,9 +6772,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
 	if (!IS_GEN2(dev))
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		if (encoder->pre_enable)
-			encoder->pre_enable(encoder);
+	intel_encoders_pre_enable(crtc, pipe_config, old_state);
 
 	i9xx_enable_pll(intel_crtc);
 
@@ -6673,8 +6786,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
 	assert_vblank_disabled(crtc);
 	drm_crtc_vblank_on(crtc);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		encoder->enable(encoder);
+	intel_encoders_enable(crtc, pipe_config, old_state);
 }
 
 static void i9xx_pfit_disable(struct intel_crtc *crtc)
@@ -6692,12 +6804,13 @@ static void i9xx_pfit_disable(struct intel_crtc *crtc)
 	I915_WRITE(PFIT_CONTROL, 0);
 }
 
-static void i9xx_crtc_disable(struct drm_crtc *crtc)
+static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
+			      struct drm_atomic_state *old_state)
 {
+	struct drm_crtc *crtc = old_crtc_state->base.crtc;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 
 	/*
@@ -6707,8 +6820,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
 	if (IS_GEN2(dev))
 		intel_wait_for_vblank(dev, pipe);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		encoder->disable(encoder);
+	intel_encoders_disable(crtc, old_crtc_state, old_state);
 
 	drm_crtc_vblank_off(crtc);
 	assert_vblank_disabled(crtc);
@@ -6717,9 +6829,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
 
 	i9xx_pfit_disable(intel_crtc);
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		if (encoder->post_disable)
-			encoder->post_disable(encoder);
+	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 
 	if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
 		if (IS_CHERRYVIEW(dev))
@@ -6730,9 +6840,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
 			i9xx_disable_pll(intel_crtc);
 	}
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		if (encoder->post_pll_disable)
-			encoder->post_pll_disable(encoder);
+	intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
 
 	if (!IS_GEN2(dev))
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
@@ -6745,6 +6853,9 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 	enum intel_display_power_domain domain;
 	unsigned long domains;
+	struct drm_atomic_state *state;
+	struct intel_crtc_state *crtc_state;
+	int ret;
 
 	if (!intel_crtc->active)
 		return;
@@ -6758,7 +6869,18 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
 		to_intel_plane_state(crtc->primary->state)->base.visible = false;
 	}
 
-	dev_priv->display.crtc_disable(crtc);
+	state = drm_atomic_state_alloc(crtc->dev);
+	state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
+
+	/* Everything's already locked, -EDEADLK can't happen. */
+	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+	ret = drm_atomic_add_affected_connectors(state, crtc);
+
+	WARN_ON(IS_ERR(crtc_state) || ret);
+
+	dev_priv->display.crtc_disable(crtc_state, state);
+
+	drm_atomic_state_free(state);
 
 	DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
 		      crtc->base.id, crtc->name);
@@ -7273,9 +7395,10 @@ static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
 
 static int pnv_get_display_clock_speed(struct drm_device *dev)
 {
+	struct pci_dev *pdev = dev->pdev;
 	u16 gcfgc = 0;
 
-	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+	pci_read_config_word(pdev, GCFGC, &gcfgc);
 
 	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
 	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
@@ -7297,9 +7420,10 @@ static int pnv_get_display_clock_speed(struct drm_device *dev)
 
 static int i915gm_get_display_clock_speed(struct drm_device *dev)
 {
+	struct pci_dev *pdev = dev->pdev;
 	u16 gcfgc = 0;
 
-	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+	pci_read_config_word(pdev, GCFGC, &gcfgc);
 
 	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
 		return 133333;
@@ -7321,6 +7445,7 @@ static int i865_get_display_clock_speed(struct drm_device *dev)
 
 static int i85x_get_display_clock_speed(struct drm_device *dev)
 {
+	struct pci_dev *pdev = dev->pdev;
 	u16 hpllcc = 0;
 
 	/*
@@ -7328,10 +7453,10 @@ static int i85x_get_display_clock_speed(struct drm_device *dev)
 	 * encoding is different :(
 	 * FIXME is this the right way to detect 852GM/852GMV?
 	 */
-	if (dev->pdev->revision == 0x1)
+	if (pdev->revision == 0x1)
 		return 133333;
 
-	pci_bus_read_config_word(dev->pdev->bus,
+	pci_bus_read_config_word(pdev->bus,
 				 PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
 
 	/* Assume that the hardware is in the high speed state.  This
@@ -7432,10 +7557,11 @@ static unsigned int intel_hpll_vco(struct drm_device *dev)
 
 static int gm45_get_display_clock_speed(struct drm_device *dev)
 {
+	struct pci_dev *pdev = dev->pdev;
 	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
 	uint16_t tmp = 0;
 
-	pci_read_config_word(dev->pdev, GCFGC, &tmp);
+	pci_read_config_word(pdev, GCFGC, &tmp);
 
 	cdclk_sel = (tmp >> 12) & 0x1;
 
@@ -7454,6 +7580,7 @@ static int gm45_get_display_clock_speed(struct drm_device *dev)
 
 static int i965gm_get_display_clock_speed(struct drm_device *dev)
 {
+	struct pci_dev *pdev = dev->pdev;
 	static const uint8_t div_3200[] = { 16, 10,  8 };
 	static const uint8_t div_4000[] = { 20, 12, 10 };
 	static const uint8_t div_5333[] = { 24, 16, 14 };
@@ -7461,7 +7588,7 @@ static int i965gm_get_display_clock_speed(struct drm_device *dev)
 	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
 	uint16_t tmp = 0;
 
-	pci_read_config_word(dev->pdev, GCFGC, &tmp);
+	pci_read_config_word(pdev, GCFGC, &tmp);
 
 	cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
 
@@ -7491,6 +7618,7 @@ fail:
 
 static int g33_get_display_clock_speed(struct drm_device *dev)
 {
+	struct pci_dev *pdev = dev->pdev;
 	static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
 	static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
 	static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
@@ -7499,7 +7627,7 @@ static int g33_get_display_clock_speed(struct drm_device *dev)
 	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
 	uint16_t tmp = 0;
 
-	pci_read_config_word(dev->pdev, GCFGC, &tmp);
+	pci_read_config_word(pdev, GCFGC, &tmp);
 
 	cdclk_sel = (tmp >> 4) & 0x7;
 
@@ -9665,7 +9793,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
 	return;
 
 error:
-	kfree(fb);
+	kfree(intel_fb);
 }
 
 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
@@ -9910,7 +10038,7 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
 		mutex_lock(&dev_priv->rps.hw_lock);
 		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
 					    val))
-			DRM_ERROR("Failed to write to D_COMP\n");
+			DRM_DEBUG_KMS("Failed to write to D_COMP\n");
 		mutex_unlock(&dev_priv->rps.hw_lock);
 	} else {
 		I915_WRITE(D_COMP_BDW, val);
@@ -10318,15 +10446,12 @@ static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
 
 	switch (port) {
 	case PORT_A:
-		pipe_config->ddi_pll_sel = SKL_DPLL0;
 		id = DPLL_ID_SKL_DPLL0;
 		break;
 	case PORT_B:
-		pipe_config->ddi_pll_sel = SKL_DPLL1;
 		id = DPLL_ID_SKL_DPLL1;
 		break;
 	case PORT_C:
-		pipe_config->ddi_pll_sel = SKL_DPLL2;
 		id = DPLL_ID_SKL_DPLL2;
 		break;
 	default:
@@ -10345,25 +10470,10 @@ static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
 	u32 temp;
 
 	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
-	pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
+	id = temp >> (port * 3 + 1);
 
-	switch (pipe_config->ddi_pll_sel) {
-	case SKL_DPLL0:
-		id = DPLL_ID_SKL_DPLL0;
-		break;
-	case SKL_DPLL1:
-		id = DPLL_ID_SKL_DPLL1;
-		break;
-	case SKL_DPLL2:
-		id = DPLL_ID_SKL_DPLL2;
-		break;
-	case SKL_DPLL3:
-		id = DPLL_ID_SKL_DPLL3;
-		break;
-	default:
-		MISSING_CASE(pipe_config->ddi_pll_sel);
+	if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
 		return;
-	}
 
 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
 }
@@ -10373,10 +10483,9 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
 				struct intel_crtc_state *pipe_config)
 {
 	enum intel_dpll_id id;
+	uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
 
-	pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
-
-	switch (pipe_config->ddi_pll_sel) {
+	switch (ddi_pll_sel) {
 	case PORT_CLK_SEL_WRPLL1:
 		id = DPLL_ID_WRPLL1;
 		break;
@@ -10396,7 +10505,7 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
 		id = DPLL_ID_LCPLL_2700;
 		break;
 	default:
-		MISSING_CASE(pipe_config->ddi_pll_sel);
+		MISSING_CASE(ddi_pll_sel);
 		/* fall through */
 	case PORT_CLK_SEL_NONE:
 		return;
@@ -10690,9 +10799,13 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
 	int pipe = intel_crtc->pipe;
 	uint32_t cntl = 0;
 
+	if (INTEL_GEN(dev_priv) >= 9 && wm->dirty_pipes & drm_crtc_mask(crtc))
+		skl_write_cursor_wm(intel_crtc, wm);
+
 	if (plane_state && plane_state->base.visible) {
 		cntl = MCURSOR_GAMMA_ENABLE;
 		switch (plane_state->base.crtc_w) {
@@ -11431,10 +11544,8 @@ static bool __pageflip_finished_cs(struct intel_crtc *crtc,
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	unsigned reset_counter;
 
-	reset_counter = i915_reset_counter(&dev_priv->gpu_error);
-	if (crtc->reset_counter != reset_counter)
+	if (abort_flip_on_reset(crtc))
 		return true;
 
 	/*
@@ -11911,8 +12022,7 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
 
 	if (work->flip_queued_req)
 		WARN_ON(i915_wait_request(work->flip_queued_req,
-					  false, NULL,
-					  NO_WAITBOOST));
+					  0, NULL, NO_WAITBOOST));
 
 	/* For framebuffer backed by dmabuf, wait for fence */
 	resv = i915_gem_object_get_dmabuf_resv(obj);
@@ -12094,17 +12204,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	crtc->primary->fb = fb;
 	update_state_fb(crtc->primary);
 
-	intel_fbc_pre_update(intel_crtc, intel_crtc->config,
-			     to_intel_plane_state(primary->state));
-
 	work->pending_flip_obj = i915_gem_object_get(obj);
 
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
 		goto cleanup;
 
-	intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
-	if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
+	intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error);
+	if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) {
 		ret = -EIO;
 		goto cleanup;
 	}
@@ -12142,6 +12249,17 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	work->gtt_offset += intel_crtc->dspaddr_offset;
 	work->rotation = crtc->primary->state->rotation;
 
+	/*
+	 * There's the potential that the next frame will not be compatible with
+	 * FBC, so we want to call pre_update() before the actual page flip.
+	 * The problem is that pre_update() caches some information about the fb
+	 * object, so we want to do this only after the object is pinned. Let's
+	 * be on the safe side and do this immediately before scheduling the
+	 * flip.
+	 */
+	intel_fbc_pre_update(intel_crtc, intel_crtc->config,
+			     to_intel_plane_state(primary->state));
+
 	if (mmio_flip) {
 		INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
 
@@ -12155,7 +12273,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 			goto cleanup_unpin;
 		}
 
-		ret = i915_gem_object_sync(obj, request);
+		ret = i915_gem_request_await_object(request, obj, false);
 		if (ret)
 			goto cleanup_request;
 
@@ -12668,10 +12786,9 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
 	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
 
 	if (IS_BROXTON(dev)) {
-		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
+		DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
 			      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
 			      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
-			      pipe_config->ddi_pll_sel,
 			      pipe_config->dpll_hw_state.ebb0,
 			      pipe_config->dpll_hw_state.ebb4,
 			      pipe_config->dpll_hw_state.pll0,
@@ -12684,15 +12801,13 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
 			      pipe_config->dpll_hw_state.pll10,
 			      pipe_config->dpll_hw_state.pcsdw12);
 	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
-		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
+		DRM_DEBUG_KMS("dpll_hw_state: "
 			      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
-			      pipe_config->ddi_pll_sel,
 			      pipe_config->dpll_hw_state.ctrl1,
 			      pipe_config->dpll_hw_state.cfgcr1,
 			      pipe_config->dpll_hw_state.cfgcr2);
 	} else if (HAS_DDI(dev)) {
-		DRM_DEBUG_KMS("ddi_pll_sel: 0x%x; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
-			      pipe_config->ddi_pll_sel,
+		DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
 			      pipe_config->dpll_hw_state.wrpll,
 			      pipe_config->dpll_hw_state.spll);
 	} else {
@@ -12805,7 +12920,6 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
 	struct intel_crtc_scaler_state scaler_state;
 	struct intel_dpll_hw_state dpll_hw_state;
 	struct intel_shared_dpll *shared_dpll;
-	uint32_t ddi_pll_sel;
 	bool force_thru;
 
 	/* FIXME: before the switch to atomic started, a new pipe_config was
@@ -12817,7 +12931,6 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
 	scaler_state = crtc_state->scaler_state;
 	shared_dpll = crtc_state->shared_dpll;
 	dpll_hw_state = crtc_state->dpll_hw_state;
-	ddi_pll_sel = crtc_state->ddi_pll_sel;
 	force_thru = crtc_state->pch_pfit.force_thru;
 
 	memset(crtc_state, 0, sizeof *crtc_state);
@@ -12826,7 +12939,6 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
 	crtc_state->scaler_state = scaler_state;
 	crtc_state->shared_dpll = shared_dpll;
 	crtc_state->dpll_hw_state = dpll_hw_state;
-	crtc_state->ddi_pll_sel = ddi_pll_sel;
 	crtc_state->pch_pfit.force_thru = force_thru;
 }
 
@@ -12914,7 +13026,7 @@ encoder_retry:
 
 		encoder = to_intel_encoder(connector_state->best_encoder);
 
-		if (!(encoder->compute_config(encoder, pipe_config))) {
+		if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
 			DRM_DEBUG_KMS("Encoder config failure\n");
 			goto fail;
 		}
@@ -13002,12 +13114,6 @@ static bool intel_fuzzy_clock_check(int clock1, int clock2)
 	return false;
 }
 
-#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
-	list_for_each_entry((intel_crtc), \
-			    &(dev)->mode_config.crtc_list, \
-			    base.head) \
-		for_each_if (mask & (1 <<(intel_crtc)->pipe))
-
 static bool
 intel_compare_m_n(unsigned int m, unsigned int n,
 		  unsigned int m2, unsigned int n2,
@@ -13255,8 +13361,6 @@ intel_pipe_config_compare(struct drm_device *dev,
 
 	PIPE_CONF_CHECK_I(double_wide);
 
-	PIPE_CONF_CHECK_X(ddi_pll_sel);
-
 	PIPE_CONF_CHECK_P(shared_dpll);
 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
@@ -13338,16 +13442,23 @@ static void verify_wm_state(struct drm_crtc *crtc,
 			  hw_entry->start, hw_entry->end);
 	}
 
-	/* cursor */
-	hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
-	sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
-
-	if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
-		DRM_ERROR("mismatch in DDB state pipe %c cursor "
-			  "(expected (%u,%u), found (%u,%u))\n",
-			  pipe_name(pipe),
-			  sw_entry->start, sw_entry->end,
-			  hw_entry->start, hw_entry->end);
+	/*
+	 * cursor
+	 * If the cursor plane isn't active, we may not have updated it's ddb
+	 * allocation. In that case since the ddb allocation will be updated
+	 * once the plane becomes visible, we can skip this check
+	 */
+	if (intel_crtc->cursor_addr) {
+		hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
+		sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+
+		if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
+			DRM_ERROR("mismatch in DDB state pipe %c cursor "
+				  "(expected (%u,%u), found (%u,%u))\n",
+				  pipe_name(pipe),
+				  sw_entry->start, sw_entry->end,
+				  hw_entry->start, hw_entry->end);
+		}
 	}
 }
 
@@ -13963,7 +14074,8 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
 				continue;
 
 			ret = i915_wait_request(intel_plane_state->wait_req,
-						true, NULL, NULL);
+						I915_WAIT_INTERRUPTIBLE,
+						NULL, NULL);
 			if (ret) {
 				/* Any hang should be swallowed by the wait */
 				WARN_ON(ret == -EIO);
@@ -14053,6 +14165,111 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
 	return false;
 }
 
+static void intel_update_crtc(struct drm_crtc *crtc,
+			      struct drm_atomic_state *state,
+			      struct drm_crtc_state *old_crtc_state,
+			      unsigned int *crtc_vblank_mask)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->state);
+	bool modeset = needs_modeset(crtc->state);
+
+	if (modeset) {
+		update_scanline_offset(intel_crtc);
+		dev_priv->display.crtc_enable(pipe_config, state);
+	} else {
+		intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
+	}
+
+	if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
+		intel_fbc_enable(
+		    intel_crtc, pipe_config,
+		    to_intel_plane_state(crtc->primary->state));
+	}
+
+	drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
+
+	if (needs_vblank_wait(pipe_config))
+		*crtc_vblank_mask |= drm_crtc_mask(crtc);
+}
+
+static void intel_update_crtcs(struct drm_atomic_state *state,
+			       unsigned int *crtc_vblank_mask)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state;
+	int i;
+
+	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+		if (!crtc->state->active)
+			continue;
+
+		intel_update_crtc(crtc, state, old_crtc_state,
+				  crtc_vblank_mask);
+	}
+}
+
+static void skl_update_crtcs(struct drm_atomic_state *state,
+			     unsigned int *crtc_vblank_mask)
+{
+	struct drm_device *dev = state->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state;
+	struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
+	struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+	unsigned int updated = 0;
+	bool progress;
+	enum pipe pipe;
+
+	/*
+	 * Whenever the number of active pipes changes, we need to make sure we
+	 * update the pipes in the right order so that their ddb allocations
+	 * never overlap with eachother inbetween CRTC updates. Otherwise we'll
+	 * cause pipe underruns and other bad stuff.
+	 */
+	do {
+		int i;
+		progress = false;
+
+		for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+			bool vbl_wait = false;
+			unsigned int cmask = drm_crtc_mask(crtc);
+			pipe = to_intel_crtc(crtc)->pipe;
+
+			if (updated & cmask || !crtc->state->active)
+				continue;
+			if (skl_ddb_allocation_overlaps(state, cur_ddb, new_ddb,
+							pipe))
+				continue;
+
+			updated |= cmask;
+
+			/*
+			 * If this is an already active pipe, it's DDB changed,
+			 * and this isn't the last pipe that needs updating
+			 * then we need to wait for a vblank to pass for the
+			 * new ddb allocation to take effect.
+			 */
+			if (!skl_ddb_allocation_equals(cur_ddb, new_ddb, pipe) &&
+			    !crtc->state->active_changed &&
+			    intel_state->wm_results.dirty_pipes != updated)
+				vbl_wait = true;
+
+			intel_update_crtc(crtc, state, old_crtc_state,
+					  crtc_vblank_mask);
+
+			if (vbl_wait)
+				intel_wait_for_vblank(dev, pipe);
+
+			progress = true;
+		}
+	} while (progress);
+}
+
 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 {
 	struct drm_device *dev = state->dev;
@@ -14076,7 +14293,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 			continue;
 
 		ret = i915_wait_request(intel_plane_state->wait_req,
-					true, NULL, NULL);
+					0, NULL, NULL);
 		/* EIO should be eaten, and we can't get interrupted in the
 		 * worker, and blocking commits have waited already. */
 		WARN_ON(ret);
@@ -14112,7 +14329,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 
 		if (old_crtc_state->active) {
 			intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
-			dev_priv->display.crtc_disable(crtc);
+			dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
 			intel_crtc->active = false;
 			intel_fbc_disable(intel_crtc);
 			intel_disable_shared_dpll(intel_crtc);
@@ -14141,20 +14358,19 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 		     intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
 			dev_priv->display.modeset_commit_cdclk(state);
 
+		/*
+		 * SKL workaround: bspec recommends we disable the SAGV when we
+		 * have more then one pipe enabled
+		 */
+		if (IS_SKYLAKE(dev_priv) && !skl_can_enable_sagv(state))
+			skl_disable_sagv(dev_priv);
+
 		intel_modeset_verify_disabled(dev);
 	}
 
-	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
+	/* Complete the events for pipes that have now been disabled */
 	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
-		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 		bool modeset = needs_modeset(crtc->state);
-		struct intel_crtc_state *pipe_config =
-			to_intel_crtc_state(crtc->state);
-
-		if (modeset && crtc->state->active) {
-			update_scanline_offset(to_intel_crtc(crtc));
-			dev_priv->display.crtc_enable(crtc);
-		}
 
 		/* Complete events for now disable pipes here. */
 		if (modeset && !crtc->state->active && crtc->state->event) {
@@ -14164,21 +14380,11 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 
 			crtc->state->event = NULL;
 		}
-
-		if (!modeset)
-			intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
-
-		if (crtc->state->active &&
-		    drm_atomic_get_existing_plane_state(state, crtc->primary))
-			intel_fbc_enable(intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state));
-
-		if (crtc->state->active)
-			drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
-
-		if (pipe_config->base.active && needs_vblank_wait(pipe_config))
-			crtc_vblank_mask |= 1 << i;
 	}
 
+	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
+	dev_priv->display.update_crtcs(state, &crtc_vblank_mask);
+
 	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
 	 * already, but still need the state for the delayed optimization. To
 	 * fix this:
@@ -14214,6 +14420,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 		intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
 	}
 
+	if (IS_SKYLAKE(dev_priv) && intel_state->modeset &&
+	    skl_can_enable_sagv(state))
+		skl_enable_sagv(dev_priv);
+
 	drm_atomic_helper_commit_hw_done(state);
 
 	if (intel_state->modeset)
@@ -14354,8 +14564,6 @@ out:
 		drm_atomic_state_free(state);
 }
 
-#undef for_each_intel_crtc_masked
-
 /*
  * FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling
  *        drm_atomic_helper_legacy_gamma_set() directly.
@@ -14601,10 +14809,12 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
 				    struct drm_crtc_state *old_crtc_state)
 {
 	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_crtc_state *old_intel_state =
 		to_intel_crtc_state(old_crtc_state);
 	bool modeset = needs_modeset(crtc->state);
+	enum pipe pipe = intel_crtc->pipe;
 
 	/* Perform vblank evasion around commit operation */
 	intel_pipe_update_start(intel_crtc);
@@ -14619,8 +14829,12 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
 
 	if (to_intel_crtc_state(crtc->state)->update_pipe)
 		intel_update_pipe_config(intel_crtc, old_intel_state);
-	else if (INTEL_INFO(dev)->gen >= 9)
+	else if (INTEL_GEN(dev_priv) >= 9) {
 		skl_detach_scalers(intel_crtc);
+
+		I915_WRITE(PIPE_WM_LINETIME(pipe),
+			   dev_priv->wm.skl_hw.wm_linetime[pipe]);
+	}
 }
 
 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
@@ -15782,6 +15996,11 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
 			skl_modeset_calc_cdclk;
 	}
 
+	if (dev_priv->info.gen >= 9)
+		dev_priv->display.update_crtcs = skl_update_crtcs;
+	else
+		dev_priv->display.update_crtcs = intel_update_crtcs;
+
 	switch (INTEL_INFO(dev_priv)->gen) {
 	case 2:
 		dev_priv->display.queue_flip = intel_gen2_queue_flip;
@@ -15983,15 +16202,16 @@ static void intel_init_quirks(struct drm_device *dev)
 static void i915_disable_vga(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	u8 sr1;
 	i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
 
 	/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
-	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+	vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
 	outb(SR01, VGA_SR_INDEX);
 	sr1 = inb(VGA_SR_DATA);
 	outb(sr1 | 1<<5, VGA_SR_DATA);
-	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+	vga_put(pdev, VGA_RSRC_LEGACY_IO);
 	udelay(300);
 
 	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
@@ -16273,15 +16493,15 @@ static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
 	return false;
 }
 
-static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
+static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct intel_connector *connector;
 
 	for_each_connector_on_encoder(dev, &encoder->base, connector)
-		return true;
+		return connector;
 
-	return false;
+	return NULL;
 }
 
 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
@@ -16386,7 +16606,6 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
 static void intel_sanitize_encoder(struct intel_encoder *encoder)
 {
 	struct intel_connector *connector;
-	struct drm_device *dev = encoder->base.dev;
 
 	/* We need to check both for a crtc link (meaning that the
 	 * encoder is active and trying to read from a pipe) and the
@@ -16394,7 +16613,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
 	bool has_active_crtc = encoder->base.crtc &&
 		to_intel_crtc(encoder->base.crtc)->active;
 
-	if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
+	connector = intel_encoder_find_connector(encoder);
+	if (connector && !has_active_crtc) {
 		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
 			      encoder->base.base.id,
 			      encoder->base.name);
@@ -16403,12 +16623,14 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
 		 * fallout from our resume register restoring. Disable
 		 * the encoder manually again. */
 		if (encoder->base.crtc) {
+			struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
+
 			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
 				      encoder->base.base.id,
 				      encoder->base.name);
-			encoder->disable(encoder);
+			encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
 			if (encoder->post_disable)
-				encoder->post_disable(encoder);
+				encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
 		}
 		encoder->base.crtc = NULL;
 
@@ -16416,12 +16638,9 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
 		 * a bug in one of the get_hw_state functions. Or someplace else
 		 * in our code, like the register restore mess on resume. Clamp
 		 * things to off as a safer default. */
-		for_each_intel_connector(dev, connector) {
-			if (connector->encoder != encoder)
-				continue;
-			connector->base.dpms = DRM_MODE_DPMS_OFF;
-			connector->base.encoder = NULL;
-		}
+
+		connector->base.dpms = DRM_MODE_DPMS_OFF;
+		connector->base.encoder = NULL;
 	}
 	/* Enabled encoders without active connectors will be fixed in
 	 * the crtc fixup. */

+ 200 - 94
drivers/gpu/drm/i915/intel_dp.c

@@ -190,6 +190,29 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
 	return (max_link_clock * max_lanes * 8) / 10;
 }
 
+static int
+intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct intel_encoder *encoder = &intel_dig_port->base;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	int max_dotclk = dev_priv->max_dotclk_freq;
+	int ds_max_dotclk;
+
+	int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
+
+	if (type != DP_DS_PORT_TYPE_VGA)
+		return max_dotclk;
+
+	ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
+						    intel_dp->downstream_ports);
+
+	if (ds_max_dotclk != 0)
+		max_dotclk = min(max_dotclk, ds_max_dotclk);
+
+	return max_dotclk;
+}
+
 static enum drm_mode_status
 intel_dp_mode_valid(struct drm_connector *connector,
 		    struct drm_display_mode *mode)
@@ -199,7 +222,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
 	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
 	int target_clock = mode->clock;
 	int max_rate, mode_rate, max_lanes, max_link_clock;
-	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+	int max_dotclk;
+
+	max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
 
 	if (is_edp(intel_dp) && fixed_mode) {
 		if (mode->hdisplay > fixed_mode->hdisplay)
@@ -1243,7 +1268,7 @@ intel_dp_aux_fini(struct intel_dp *intel_dp)
 }
 
 static void
-intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
+intel_dp_aux_init(struct intel_dp *intel_dp)
 {
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	enum port port = intel_dig_port->port;
@@ -1419,6 +1444,44 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
 	DRM_DEBUG_KMS("common rates: %s\n", str);
 }
 
+static void intel_dp_print_hw_revision(struct intel_dp *intel_dp)
+{
+	uint8_t rev;
+	int len;
+
+	if ((drm_debug & DRM_UT_KMS) == 0)
+		return;
+
+	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+	      DP_DWN_STRM_PORT_PRESENT))
+		return;
+
+	len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_HW_REV, &rev, 1);
+	if (len < 0)
+		return;
+
+	DRM_DEBUG_KMS("sink hw revision: %d.%d\n", (rev & 0xf0) >> 4, rev & 0xf);
+}
+
+static void intel_dp_print_sw_revision(struct intel_dp *intel_dp)
+{
+	uint8_t rev[2];
+	int len;
+
+	if ((drm_debug & DRM_UT_KMS) == 0)
+		return;
+
+	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+	      DP_DWN_STRM_PORT_PRESENT))
+		return;
+
+	len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_SW_REV, &rev, 2);
+	if (len < 0)
+		return;
+
+	DRM_DEBUG_KMS("sink sw revision: %d.%d\n", rev[0], rev[1]);
+}
+
 static int rate_to_index(int find, const int *rates)
 {
 	int i = 0;
@@ -1461,9 +1524,24 @@ void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
 	}
 }
 
+static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
+				struct intel_crtc_state *pipe_config)
+{
+	int bpp, bpc;
+
+	bpp = pipe_config->pipe_bpp;
+	bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
+
+	if (bpc > 0)
+		bpp = min(bpp, 3*bpc);
+
+	return bpp;
+}
+
 bool
 intel_dp_compute_config(struct intel_encoder *encoder,
-			struct intel_crtc_state *pipe_config)
+			struct intel_crtc_state *pipe_config,
+			struct drm_connector_state *conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1526,7 +1604,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 
 	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
 	 * bpc in between. */
-	bpp = pipe_config->pipe_bpp;
+	bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
 	if (is_edp(intel_dp)) {
 
 		/* Get bpp from vbt only for panels that dont have bpp in edid */
@@ -1640,23 +1718,28 @@ found:
 }
 
 void intel_dp_set_link_params(struct intel_dp *intel_dp,
-			      const struct intel_crtc_state *pipe_config)
+			      int link_rate, uint8_t lane_count,
+			      bool link_mst)
 {
-	intel_dp->link_rate = pipe_config->port_clock;
-	intel_dp->lane_count = pipe_config->lane_count;
-	intel_dp->link_mst = intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST);
+	intel_dp->link_rate = link_rate;
+	intel_dp->lane_count = lane_count;
+	intel_dp->link_mst = link_mst;
 }
 
-static void intel_dp_prepare(struct intel_encoder *encoder)
+static void intel_dp_prepare(struct intel_encoder *encoder,
+			     struct intel_crtc_state *pipe_config)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 	enum port port = dp_to_dig_port(intel_dp)->port;
 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 
-	intel_dp_set_link_params(intel_dp, crtc->config);
+	intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
+				 pipe_config->lane_count,
+				 intel_crtc_has_type(pipe_config,
+						     INTEL_OUTPUT_DP_MST));
 
 	/*
 	 * There are four kinds of DP registers:
@@ -1682,7 +1765,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
 
 	/* Handle DP bits in common between all three register formats */
 	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
-	intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
+	intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
 
 	/* Split out the IBX/CPU vs CPT settings */
 
@@ -1710,7 +1793,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
 		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
 	} else {
 		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
-		    !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
+		    !IS_CHERRYVIEW(dev) && pipe_config->limited_color_range)
 			intel_dp->DP |= DP_COLOR_RANGE_16_235;
 
 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -2249,10 +2332,10 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
 
-static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
+static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
+				struct intel_crtc_state *pipe_config)
 {
-	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
 	assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -2260,11 +2343,11 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
 	assert_edp_pll_disabled(dev_priv);
 
 	DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
-		      crtc->config->port_clock);
+		      pipe_config->port_clock);
 
 	intel_dp->DP &= ~DP_PLL_FREQ_MASK;
 
-	if (crtc->config->port_clock == 162000)
+	if (pipe_config->port_clock == 162000)
 		intel_dp->DP |= DP_PLL_FREQ_162MHZ;
 	else
 		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
@@ -2473,16 +2556,17 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
 	}
 }
 
-static void intel_disable_dp(struct intel_encoder *encoder)
+static void intel_disable_dp(struct intel_encoder *encoder,
+			     struct intel_crtc_state *old_crtc_state,
+			     struct drm_connector_state *old_conn_state)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-	struct drm_device *dev = encoder->base.dev;
-	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-	if (crtc->config->has_audio)
+	if (old_crtc_state->has_audio)
 		intel_audio_codec_disable(encoder);
 
-	if (HAS_PSR(dev) && !HAS_DDI(dev))
+	if (HAS_PSR(dev_priv) && !HAS_DDI(dev_priv))
 		intel_psr_disable(intel_dp);
 
 	/* Make sure the panel is off before trying to change the mode. But also
@@ -2493,11 +2577,13 @@ static void intel_disable_dp(struct intel_encoder *encoder)
 	intel_edp_panel_off(intel_dp);
 
 	/* disable the port before the pipe on g4x */
-	if (INTEL_INFO(dev)->gen < 5)
+	if (INTEL_GEN(dev_priv) < 5)
 		intel_dp_link_down(intel_dp);
 }
 
-static void ilk_post_disable_dp(struct intel_encoder *encoder)
+static void ilk_post_disable_dp(struct intel_encoder *encoder,
+				struct intel_crtc_state *old_crtc_state,
+				struct drm_connector_state *old_conn_state)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 	enum port port = dp_to_dig_port(intel_dp)->port;
@@ -2509,14 +2595,18 @@ static void ilk_post_disable_dp(struct intel_encoder *encoder)
 		ironlake_edp_pll_off(intel_dp);
 }
 
-static void vlv_post_disable_dp(struct intel_encoder *encoder)
+static void vlv_post_disable_dp(struct intel_encoder *encoder,
+				struct intel_crtc_state *old_crtc_state,
+				struct drm_connector_state *old_conn_state)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
 	intel_dp_link_down(intel_dp);
 }
 
-static void chv_post_disable_dp(struct intel_encoder *encoder)
+static void chv_post_disable_dp(struct intel_encoder *encoder,
+				struct intel_crtc_state *old_crtc_state,
+				struct drm_connector_state *old_conn_state)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 	struct drm_device *dev = encoder->base.dev;
@@ -2542,6 +2632,10 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum port port = intel_dig_port->port;
 
+	if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
+		DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
+			      dp_train_pat & DP_TRAINING_PATTERN_MASK);
+
 	if (HAS_DDI(dev)) {
 		uint32_t temp = I915_READ(DP_TP_CTL(port));
 
@@ -2583,7 +2677,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
 			break;
 		case DP_TRAINING_PATTERN_3:
-			DRM_ERROR("DP training pattern 3 not supported\n");
+			DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
 			break;
 		}
@@ -2608,7 +2702,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
 			if (IS_CHERRYVIEW(dev)) {
 				*DP |= DP_LINK_TRAIN_PAT_3_CHV;
 			} else {
-				DRM_ERROR("DP training pattern 3 not supported\n");
+				DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
 				*DP |= DP_LINK_TRAIN_PAT_2;
 			}
 			break;
@@ -2616,19 +2710,15 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
 	}
 }
 
-static void intel_dp_enable_port(struct intel_dp *intel_dp)
+static void intel_dp_enable_port(struct intel_dp *intel_dp,
+				 struct intel_crtc_state *old_crtc_state)
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *crtc =
-		to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
 
 	/* enable with pattern 1 (as per spec) */
-	_intel_dp_set_link_train(intel_dp, &intel_dp->DP,
-				 DP_TRAINING_PATTERN_1);
 
-	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
-	POSTING_READ(intel_dp->output_reg);
+	intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
 
 	/*
 	 * Magic for VLV/CHV. We _must_ first set up the register
@@ -2637,14 +2727,15 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp)
 	 * fail when the power sequencer is freshly used for this port.
 	 */
 	intel_dp->DP |= DP_PORT_EN;
-	if (crtc->config->has_audio)
+	if (old_crtc_state->has_audio)
 		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
 
 	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
 	POSTING_READ(intel_dp->output_reg);
 }
 
-static void intel_enable_dp(struct intel_encoder *encoder)
+static void intel_enable_dp(struct intel_encoder *encoder,
+			    struct intel_crtc_state *pipe_config)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 	struct drm_device *dev = encoder->base.dev;
@@ -2661,7 +2752,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
 		vlv_init_panel_power_sequencer(intel_dp);
 
-	intel_dp_enable_port(intel_dp);
+	intel_dp_enable_port(intel_dp, pipe_config);
 
 	edp_panel_vdd_on(intel_dp);
 	edp_panel_on(intel_dp);
@@ -2673,7 +2764,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
 		unsigned int lane_mask = 0x0;
 
 		if (IS_CHERRYVIEW(dev))
-			lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
+			lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
 
 		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
 				    lane_mask);
@@ -2683,22 +2774,26 @@ static void intel_enable_dp(struct intel_encoder *encoder)
 	intel_dp_start_link_train(intel_dp);
 	intel_dp_stop_link_train(intel_dp);
 
-	if (crtc->config->has_audio) {
+	if (pipe_config->has_audio) {
 		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
 				 pipe_name(pipe));
 		intel_audio_codec_enable(encoder);
 	}
 }
 
-static void g4x_enable_dp(struct intel_encoder *encoder)
+static void g4x_enable_dp(struct intel_encoder *encoder,
+			  struct intel_crtc_state *pipe_config,
+			  struct drm_connector_state *conn_state)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
-	intel_enable_dp(encoder);
+	intel_enable_dp(encoder, pipe_config);
 	intel_edp_backlight_on(intel_dp);
 }
 
-static void vlv_enable_dp(struct intel_encoder *encoder)
+static void vlv_enable_dp(struct intel_encoder *encoder,
+			  struct intel_crtc_state *pipe_config,
+			  struct drm_connector_state *conn_state)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
@@ -2706,16 +2801,18 @@ static void vlv_enable_dp(struct intel_encoder *encoder)
 	intel_psr_enable(intel_dp);
 }
 
-static void g4x_pre_enable_dp(struct intel_encoder *encoder)
+static void g4x_pre_enable_dp(struct intel_encoder *encoder,
+			      struct intel_crtc_state *pipe_config,
+			      struct drm_connector_state *conn_state)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 	enum port port = dp_to_dig_port(intel_dp)->port;
 
-	intel_dp_prepare(encoder);
+	intel_dp_prepare(encoder, pipe_config);
 
 	/* Only ilk+ has port A */
 	if (port == PORT_A)
-		ironlake_edp_pll_on(intel_dp);
+		ironlake_edp_pll_on(intel_dp, pipe_config);
 }
 
 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
@@ -2821,38 +2918,48 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
 }
 
-static void vlv_pre_enable_dp(struct intel_encoder *encoder)
+static void vlv_pre_enable_dp(struct intel_encoder *encoder,
+			      struct intel_crtc_state *pipe_config,
+			      struct drm_connector_state *conn_state)
 {
 	vlv_phy_pre_encoder_enable(encoder);
 
-	intel_enable_dp(encoder);
+	intel_enable_dp(encoder, pipe_config);
 }
 
-static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
+static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
+				  struct intel_crtc_state *pipe_config,
+				  struct drm_connector_state *conn_state)
 {
-	intel_dp_prepare(encoder);
+	intel_dp_prepare(encoder, pipe_config);
 
 	vlv_phy_pre_pll_enable(encoder);
 }
 
-static void chv_pre_enable_dp(struct intel_encoder *encoder)
+static void chv_pre_enable_dp(struct intel_encoder *encoder,
+			      struct intel_crtc_state *pipe_config,
+			      struct drm_connector_state *conn_state)
 {
 	chv_phy_pre_encoder_enable(encoder);
 
-	intel_enable_dp(encoder);
+	intel_enable_dp(encoder, pipe_config);
 
 	/* Second common lane will stay alive on its own now */
 	chv_phy_release_cl2_override(encoder);
 }
 
-static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
+static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
+				  struct intel_crtc_state *pipe_config,
+				  struct drm_connector_state *conn_state)
 {
-	intel_dp_prepare(encoder);
+	intel_dp_prepare(encoder, pipe_config);
 
 	chv_phy_pre_pll_enable(encoder);
 }
 
-static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
+static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
+				    struct intel_crtc_state *pipe_config,
+				    struct drm_connector_state *conn_state)
 {
 	chv_phy_post_pll_disable(encoder);
 }
@@ -4171,7 +4278,7 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
  *
  * Return %true if @port is connected, %false otherwise.
  */
-bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
+static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
 					 struct intel_digital_port *port)
 {
 	if (HAS_PCH_IBX(dev_priv))
@@ -4282,6 +4389,9 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
 
 	intel_dp_probe_oui(intel_dp);
 
+	intel_dp_print_hw_revision(intel_dp);
+	intel_dp_print_sw_revision(intel_dp);
+
 	intel_dp_configure_mst(intel_dp);
 
 	if (intel_dp->is_mst) {
@@ -5022,7 +5132,8 @@ static void intel_dp_pps_init(struct drm_device *dev,
 
 /**
  * intel_dp_set_drrs_state - program registers for RR switch to take effect
- * @dev: DRM device
+ * @dev_priv: i915 device
+ * @crtc_state: a pointer to the active intel_crtc_state
  * @refresh_rate: RR to be programmed
  *
  * This function gets called when refresh rate (RR) has to be changed from
@@ -5032,14 +5143,14 @@ static void intel_dp_pps_init(struct drm_device *dev,
  *
  * The caller of this function needs to take a lock on dev_priv->drrs.
  */
-static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
+static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
+				    struct intel_crtc_state *crtc_state,
+				    int refresh_rate)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_encoder *encoder;
 	struct intel_digital_port *dig_port = NULL;
 	struct intel_dp *intel_dp = dev_priv->drrs.dp;
-	struct intel_crtc_state *config = NULL;
-	struct intel_crtc *intel_crtc = NULL;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
 	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
 
 	if (refresh_rate <= 0) {
@@ -5066,8 +5177,6 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
 		return;
 	}
 
-	config = intel_crtc->config;
-
 	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
 		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
 		return;
@@ -5083,12 +5192,12 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
 		return;
 	}
 
-	if (!intel_crtc->active) {
+	if (!crtc_state->base.active) {
 		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
 		return;
 	}
 
-	if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
+	if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
 		switch (index) {
 		case DRRS_HIGH_RR:
 			intel_dp_set_m_n(intel_crtc, M1_N1);
@@ -5100,18 +5209,18 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
 		default:
 			DRM_ERROR("Unsupported refreshrate type\n");
 		}
-	} else if (INTEL_INFO(dev)->gen > 6) {
-		i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
+	} else if (INTEL_GEN(dev_priv) > 6) {
+		i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
 		u32 val;
 
 		val = I915_READ(reg);
 		if (index > DRRS_HIGH_RR) {
-			if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
 			else
 				val |= PIPECONF_EDP_RR_MODE_SWITCH;
 		} else {
-			if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
 			else
 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
@@ -5127,18 +5236,17 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
 /**
  * intel_edp_drrs_enable - init drrs struct if supported
  * @intel_dp: DP struct
+ * @crtc_state: A pointer to the active crtc state.
  *
  * Initializes frontbuffer_bits and drrs.dp
  */
-void intel_edp_drrs_enable(struct intel_dp *intel_dp)
+void intel_edp_drrs_enable(struct intel_dp *intel_dp,
+			   struct intel_crtc_state *crtc_state)
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_crtc *crtc = dig_port->base.base.crtc;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-	if (!intel_crtc->config->has_drrs) {
+	if (!crtc_state->has_drrs) {
 		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
 		return;
 	}
@@ -5160,17 +5268,16 @@ unlock:
 /**
  * intel_edp_drrs_disable - Disable DRRS
  * @intel_dp: DP struct
+ * @old_crtc_state: Pointer to old crtc_state.
  *
  */
-void intel_edp_drrs_disable(struct intel_dp *intel_dp)
+void intel_edp_drrs_disable(struct intel_dp *intel_dp,
+			    struct intel_crtc_state *old_crtc_state)
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_crtc *crtc = dig_port->base.base.crtc;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-	if (!intel_crtc->config->has_drrs)
+	if (!old_crtc_state->has_drrs)
 		return;
 
 	mutex_lock(&dev_priv->drrs.mutex);
@@ -5180,9 +5287,8 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp)
 	}
 
 	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
-		intel_dp_set_drrs_state(&dev_priv->drm,
-					intel_dp->attached_connector->panel.
-					fixed_mode->vrefresh);
+		intel_dp_set_drrs_state(dev_priv, old_crtc_state,
+			intel_dp->attached_connector->panel.fixed_mode->vrefresh);
 
 	dev_priv->drrs.dp = NULL;
 	mutex_unlock(&dev_priv->drrs.mutex);
@@ -5211,10 +5317,12 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work)
 	if (dev_priv->drrs.busy_frontbuffer_bits)
 		goto unlock;
 
-	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
-		intel_dp_set_drrs_state(&dev_priv->drm,
-					intel_dp->attached_connector->panel.
-					downclock_mode->vrefresh);
+	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
+		struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
+
+		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
+			intel_dp->attached_connector->panel.downclock_mode->vrefresh);
+	}
 
 unlock:
 	mutex_unlock(&dev_priv->drrs.mutex);
@@ -5255,9 +5363,8 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
 
 	/* invalidate means busy screen hence upclock */
 	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
-		intel_dp_set_drrs_state(&dev_priv->drm,
-					dev_priv->drrs.dp->attached_connector->panel.
-					fixed_mode->vrefresh);
+		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
+			dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
 
 	mutex_unlock(&dev_priv->drrs.mutex);
 }
@@ -5299,9 +5406,8 @@ void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
 
 	/* flush means busy screen hence upclock */
 	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
-		intel_dp_set_drrs_state(&dev_priv->drm,
-					dev_priv->drrs.dp->attached_connector->panel.
-					fixed_mode->vrefresh);
+		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
+				dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
 
 	/*
 	 * flush also means no more activity hence schedule downclock, if all
@@ -5598,7 +5704,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 	connector->interlace_allowed = true;
 	connector->doublescan_allowed = 0;
 
-	intel_dp_aux_init(intel_dp, intel_connector);
+	intel_dp_aux_init(intel_dp);
 
 	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
 			  edp_panel_vdd_work);

+ 70 - 69
drivers/gpu/drm/i915/intel_dp_link_training.c

@@ -23,6 +23,15 @@
 
 #include "intel_drv.h"
 
+static void
+intel_dp_dump_link_status(const uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+
+	DRM_DEBUG_KMS("ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x",
+		      link_status[0], link_status[1], link_status[2],
+		      link_status[3], link_status[4], link_status[5]);
+}
+
 static void
 intel_get_adjust_train(struct intel_dp *intel_dp,
 		       const uint8_t link_status[DP_LINK_STATUS_SIZE])
@@ -103,13 +112,24 @@ intel_dp_update_link_train(struct intel_dp *intel_dp)
 	return ret == intel_dp->lane_count;
 }
 
+static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp)
+{
+	int lane;
+
+	for (lane = 0; lane < intel_dp->lane_count; lane++)
+		if ((intel_dp->train_set[lane] &
+		     DP_TRAIN_MAX_SWING_REACHED) == 0)
+			return false;
+
+	return true;
+}
+
 /* Enable corresponding port and start training pattern 1 */
-static void
+static bool
 intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
 {
-	int i;
 	uint8_t voltage;
-	int voltage_tries, loop_tries;
+	int voltage_tries, max_vswing_tries;
 	uint8_t link_config[2];
 	uint8_t link_bw, rate_select;
 
@@ -125,6 +145,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
 	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
 		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
 	drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
+
 	if (intel_dp->num_sink_rates)
 		drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
 				  &rate_select, 1);
@@ -140,60 +161,54 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
 				       DP_TRAINING_PATTERN_1 |
 				       DP_LINK_SCRAMBLING_DISABLE)) {
 		DRM_ERROR("failed to enable link training\n");
-		return;
+		return false;
 	}
 
-	voltage = 0xff;
-	voltage_tries = 0;
-	loop_tries = 0;
+	voltage_tries = 1;
+	max_vswing_tries = 0;
 	for (;;) {
 		uint8_t link_status[DP_LINK_STATUS_SIZE];
 
 		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
+
 		if (!intel_dp_get_link_status(intel_dp, link_status)) {
 			DRM_ERROR("failed to get link status\n");
-			break;
+			return false;
 		}
 
 		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
 			DRM_DEBUG_KMS("clock recovery OK\n");
-			break;
+			return true;
 		}
 
-		/* Check to see if we've tried the max voltage */
-		for (i = 0; i < intel_dp->lane_count; i++)
-			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
-				break;
-		if (i == intel_dp->lane_count) {
-			++loop_tries;
-			if (loop_tries == 5) {
-				DRM_ERROR("too many full retries, give up\n");
-				break;
-			}
-			intel_dp_reset_link_train(intel_dp,
-						  DP_TRAINING_PATTERN_1 |
-						  DP_LINK_SCRAMBLING_DISABLE);
-			voltage_tries = 0;
-			continue;
+		if (voltage_tries == 5) {
+			DRM_DEBUG_KMS("Same voltage tried 5 times\n");
+			return false;
+		}
+
+		if (max_vswing_tries == 1) {
+			DRM_DEBUG_KMS("Max Voltage Swing reached\n");
+			return false;
 		}
 
-		/* Check to see if we've tried the same voltage 5 times */
-		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
-			++voltage_tries;
-			if (voltage_tries == 5) {
-				DRM_ERROR("too many voltage retries, give up\n");
-				break;
-			}
-		} else
-			voltage_tries = 0;
 		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
 
 		/* Update training set as requested by target */
 		intel_get_adjust_train(intel_dp, link_status);
 		if (!intel_dp_update_link_train(intel_dp)) {
 			DRM_ERROR("failed to update link training\n");
-			break;
+			return false;
 		}
+
+		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
+		    voltage)
+			++voltage_tries;
+		else
+			voltage_tries = 1;
+
+		if (intel_dp_link_max_vswing_reached(intel_dp))
+			++max_vswing_tries;
+
 	}
 }
 
@@ -229,12 +244,12 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
 	return training_pattern;
 }
 
-static void
+static bool
 intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
 {
-	bool channel_eq = false;
-	int tries, cr_tries;
+	int tries;
 	u32 training_pattern;
+	uint8_t link_status[DP_LINK_STATUS_SIZE];
 
 	training_pattern = intel_dp_training_pattern(intel_dp);
 
@@ -243,19 +258,11 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
 				     training_pattern |
 				     DP_LINK_SCRAMBLING_DISABLE)) {
 		DRM_ERROR("failed to start channel equalization\n");
-		return;
+		return false;
 	}
 
-	tries = 0;
-	cr_tries = 0;
-	channel_eq = false;
-	for (;;) {
-		uint8_t link_status[DP_LINK_STATUS_SIZE];
-
-		if (cr_tries > 5) {
-			DRM_ERROR("failed to train DP, aborting\n");
-			break;
-		}
+	intel_dp->channel_eq_status = false;
+	for (tries = 0; tries < 5; tries++) {
 
 		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
 		if (!intel_dp_get_link_status(intel_dp, link_status)) {
@@ -266,44 +273,38 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
 		/* Make sure clock is still ok */
 		if (!drm_dp_clock_recovery_ok(link_status,
 					      intel_dp->lane_count)) {
-			intel_dp_link_training_clock_recovery(intel_dp);
-			intel_dp_set_link_train(intel_dp,
-						training_pattern |
-						DP_LINK_SCRAMBLING_DISABLE);
-			cr_tries++;
-			continue;
+			intel_dp_dump_link_status(link_status);
+			DRM_DEBUG_KMS("Clock recovery check failed, cannot "
+				      "continue channel equalization\n");
+			break;
 		}
 
 		if (drm_dp_channel_eq_ok(link_status,
 					 intel_dp->lane_count)) {
-			channel_eq = true;
+			intel_dp->channel_eq_status = true;
+			DRM_DEBUG_KMS("Channel EQ done. DP Training "
+				      "successful\n");
 			break;
 		}
 
-		/* Try 5 times, then try clock recovery if that fails */
-		if (tries > 5) {
-			intel_dp_link_training_clock_recovery(intel_dp);
-			intel_dp_set_link_train(intel_dp,
-						training_pattern |
-						DP_LINK_SCRAMBLING_DISABLE);
-			tries = 0;
-			cr_tries++;
-			continue;
-		}
-
 		/* Update training set as requested by target */
 		intel_get_adjust_train(intel_dp, link_status);
 		if (!intel_dp_update_link_train(intel_dp)) {
 			DRM_ERROR("failed to update link training\n");
 			break;
 		}
-		++tries;
+	}
+
+	/* Try 5 times, else fail and try at lower BW */
+	if (tries == 5) {
+		intel_dp_dump_link_status(link_status);
+		DRM_DEBUG_KMS("Channel equalization failed 5 times\n");
 	}
 
 	intel_dp_set_idle_link_train(intel_dp);
 
-	if (channel_eq)
-		DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
+	return intel_dp->channel_eq_status;
+
 }
 
 void intel_dp_stop_link_train(struct intel_dp *intel_dp)

+ 40 - 57
drivers/gpu/drm/i915/intel_dp_mst.c

@@ -31,18 +31,16 @@
 #include <drm/drm_edid.h>
 
 static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
-					struct intel_crtc_state *pipe_config)
+					struct intel_crtc_state *pipe_config,
+					struct drm_connector_state *conn_state)
 {
 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
 	struct drm_atomic_state *state;
-	int bpp, i;
+	int bpp;
 	int lane_count, slots;
 	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-	struct drm_connector *drm_connector;
-	struct intel_connector *connector, *found = NULL;
-	struct drm_connector_state *connector_state;
 	int mst_pbn;
 
 	pipe_config->dp_encoder_is_mst = true;
@@ -54,7 +52,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
 	 */
 	lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
 
-
 	pipe_config->lane_count = lane_count;
 
 	pipe_config->pipe_bpp = 24;
@@ -62,20 +59,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
 
 	state = pipe_config->base.state;
 
-	for_each_connector_in_state(state, drm_connector, connector_state, i) {
-		connector = to_intel_connector(drm_connector);
-
-		if (connector_state->best_encoder == &encoder->base) {
-			found = connector;
-			break;
-		}
-	}
-
-	if (!found) {
-		DRM_ERROR("can't find connector\n");
-		return false;
-	}
-
 	mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
 
 	pipe_config->pbn = mst_pbn;
@@ -92,16 +75,20 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
 
 }
 
-static void intel_mst_disable_dp(struct intel_encoder *encoder)
+static void intel_mst_disable_dp(struct intel_encoder *encoder,
+				 struct intel_crtc_state *old_crtc_state,
+				 struct drm_connector_state *old_conn_state)
 {
 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	struct intel_connector *connector =
+		to_intel_connector(old_conn_state->connector);
 	int ret;
 
 	DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
 
-	drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->connector->port);
+	drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
 
 	ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
 	if (ret) {
@@ -109,11 +96,15 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
 	}
 }
 
-static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
+static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
+				      struct intel_crtc_state *old_crtc_state,
+				      struct drm_connector_state *old_conn_state)
 {
 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	struct intel_connector *connector =
+		to_intel_connector(old_conn_state->connector);
 
 	DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
 
@@ -122,59 +113,51 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
 	/* and this can also fail */
 	drm_dp_update_payload_part2(&intel_dp->mst_mgr);
 
-	drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->connector->port);
+	drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
 
 	intel_dp->active_mst_links--;
 
 	intel_mst->connector = NULL;
 	if (intel_dp->active_mst_links == 0) {
-		intel_dig_port->base.post_disable(&intel_dig_port->base);
+		intel_dig_port->base.post_disable(&intel_dig_port->base,
+						  NULL, NULL);
+
 		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
 	}
 }
 
-static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
+static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
+				    struct intel_crtc_state *pipe_config,
+				    struct drm_connector_state *conn_state)
 {
 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
-	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum port port = intel_dig_port->port;
+	struct intel_connector *connector =
+		to_intel_connector(conn_state->connector);
 	int ret;
 	uint32_t temp;
-	struct intel_connector *found = NULL, *connector;
 	int slots;
-	struct drm_crtc *crtc = encoder->base.crtc;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-	for_each_intel_connector(dev, connector) {
-		if (connector->base.state->best_encoder == &encoder->base) {
-			found = connector;
-			break;
-		}
-	}
-
-	if (!found) {
-		DRM_ERROR("can't find connector\n");
-		return;
-	}
 
 	/* MST encoders are bound to a crtc, not to a connector,
 	 * force the mapping here for get_hw_state.
 	 */
-	found->encoder = encoder;
+	connector->encoder = encoder;
+	intel_mst->connector = connector;
 
 	DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
 
-	intel_mst->connector = found;
-
 	if (intel_dp->active_mst_links == 0) {
-		intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config);
+		intel_ddi_clk_select(&intel_dig_port->base,
+				     pipe_config->shared_dpll);
 
 		intel_prepare_dp_ddi_buffers(&intel_dig_port->base);
-
-		intel_dp_set_link_params(intel_dp, intel_crtc->config);
+		intel_dp_set_link_params(intel_dp,
+					 pipe_config->port_clock,
+					 pipe_config->lane_count,
+					 true);
 
 		intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
 
@@ -185,8 +168,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
 	}
 
 	ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
-				       intel_mst->connector->port,
-				       intel_crtc->config->pbn, &slots);
+				       connector->port,
+				       pipe_config->pbn, &slots);
 	if (ret == false) {
 		DRM_ERROR("failed to allocate vcpi\n");
 		return;
@@ -200,13 +183,14 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
 	ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
 }
 
-static void intel_mst_enable_dp(struct intel_encoder *encoder)
+static void intel_mst_enable_dp(struct intel_encoder *encoder,
+				struct intel_crtc_state *pipe_config,
+				struct drm_connector_state *conn_state)
 {
 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
-	struct drm_device *dev = intel_dig_port->base.base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum port port = intel_dig_port->port;
 	int ret;
 
@@ -239,9 +223,8 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
 {
 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
-	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
 	u32 temp, flags = 0;
 

+ 285 - 173
drivers/gpu/drm/i915/intel_dpll_mgr.c

@@ -23,6 +23,44 @@
 
 #include "intel_drv.h"
 
+struct intel_shared_dpll *
+skl_find_link_pll(struct drm_i915_private *dev_priv, int clock)
+{
+	struct intel_shared_dpll *pll = NULL;
+	struct intel_dpll_hw_state dpll_hw_state;
+	enum intel_dpll_id i;
+	bool found = false;
+
+	if (!skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
+		return pll;
+
+	for (i = DPLL_ID_SKL_DPLL1; i <= DPLL_ID_SKL_DPLL3; i++) {
+		pll = &dev_priv->shared_dplls[i];
+
+		/* Only want to check enabled timings first */
+		if (pll->config.crtc_mask == 0)
+			continue;
+
+		if (memcmp(&dpll_hw_state, &pll->config.hw_state,
+			   sizeof(pll->config.hw_state)) == 0) {
+			found = true;
+			break;
+		}
+	}
+
+	/* Ok no matching timings, maybe there's a free one? */
+	for (i = DPLL_ID_SKL_DPLL1;
+	     ((found == false) && (i <= DPLL_ID_SKL_DPLL3)); i++) {
+		pll = &dev_priv->shared_dplls[i];
+		if (pll->config.crtc_mask == 0) {
+			pll->config.hw_state = dpll_hw_state;
+			break;
+		}
+	}
+
+	return pll;
+}
+
 struct intel_shared_dpll *
 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
 			    enum intel_dpll_id id)
@@ -452,26 +490,6 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
 	return val & SPLL_PLL_ENABLE;
 }
 
-static uint32_t hsw_pll_to_ddi_pll_sel(struct intel_shared_dpll *pll)
-{
-	switch (pll->id) {
-	case DPLL_ID_WRPLL1:
-		return PORT_CLK_SEL_WRPLL1;
-	case DPLL_ID_WRPLL2:
-		return PORT_CLK_SEL_WRPLL2;
-	case DPLL_ID_SPLL:
-		return PORT_CLK_SEL_SPLL;
-	case DPLL_ID_LCPLL_810:
-		return PORT_CLK_SEL_LCPLL_810;
-	case DPLL_ID_LCPLL_1350:
-		return PORT_CLK_SEL_LCPLL_1350;
-	case DPLL_ID_LCPLL_2700:
-		return PORT_CLK_SEL_LCPLL_2700;
-	default:
-		return PORT_CLK_SEL_NONE;
-	}
-}
-
 #define LC_FREQ 2700
 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
 
@@ -687,11 +705,65 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
 	*r2_out = best.r2;
 }
 
+static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
+						       struct intel_crtc *crtc,
+						       struct intel_crtc_state *crtc_state)
+{
+	struct intel_shared_dpll *pll;
+	uint32_t val;
+	unsigned int p, n2, r2;
+
+	hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+
+	val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
+	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+	      WRPLL_DIVIDER_POST(p);
+
+	crtc_state->dpll_hw_state.wrpll = val;
+
+	pll = intel_find_shared_dpll(crtc, crtc_state,
+				     DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
+
+	if (!pll)
+		return NULL;
+
+	return pll;
+}
+
+struct intel_shared_dpll *hsw_ddi_dp_get_dpll(struct intel_encoder *encoder,
+					      int clock)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_shared_dpll *pll;
+	enum intel_dpll_id pll_id;
+
+	switch (clock / 2) {
+	case 81000:
+		pll_id = DPLL_ID_LCPLL_810;
+		break;
+	case 135000:
+		pll_id = DPLL_ID_LCPLL_1350;
+		break;
+	case 270000:
+		pll_id = DPLL_ID_LCPLL_2700;
+		break;
+	default:
+		DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
+		return NULL;
+	}
+
+	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
+
+	if (!pll)
+		return NULL;
+
+	return pll;
+}
+
 static struct intel_shared_dpll *
 hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
 	     struct intel_encoder *encoder)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_shared_dpll *pll;
 	int clock = crtc_state->port_clock;
 
@@ -699,41 +771,12 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
 	       sizeof(crtc_state->dpll_hw_state));
 
 	if (encoder->type == INTEL_OUTPUT_HDMI) {
-		uint32_t val;
-		unsigned p, n2, r2;
-
-		hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
-
-		val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
-		      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
-		      WRPLL_DIVIDER_POST(p);
-
-		crtc_state->dpll_hw_state.wrpll = val;
-
-		pll = intel_find_shared_dpll(crtc, crtc_state,
-					     DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
+		pll = hsw_ddi_hdmi_get_dpll(clock, crtc, crtc_state);
 
 	} else if (encoder->type == INTEL_OUTPUT_DP ||
 		   encoder->type == INTEL_OUTPUT_DP_MST ||
 		   encoder->type == INTEL_OUTPUT_EDP) {
-		enum intel_dpll_id pll_id;
-
-		switch (clock / 2) {
-		case 81000:
-			pll_id = DPLL_ID_LCPLL_810;
-			break;
-		case 135000:
-			pll_id = DPLL_ID_LCPLL_1350;
-			break;
-		case 270000:
-			pll_id = DPLL_ID_LCPLL_2700;
-			break;
-		default:
-			DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
-			return NULL;
-		}
-
-		pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
+		pll = hsw_ddi_dp_get_dpll(encoder, clock);
 
 	} else if (encoder->type == INTEL_OUTPUT_ANALOG) {
 		if (WARN_ON(crtc_state->port_clock / 2 != 135000))
@@ -751,14 +794,11 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
 	if (!pll)
 		return NULL;
 
-	crtc_state->ddi_pll_sel = hsw_pll_to_ddi_pll_sel(pll);
-
 	intel_reference_shared_dpll(pll, crtc_state);
 
 	return pll;
 }
 
-
 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
 	.enable = hsw_ddi_wrpll_enable,
 	.disable = hsw_ddi_wrpll_disable,
@@ -1194,75 +1234,110 @@ skip_remaining_dividers:
 	return true;
 }
 
-static struct intel_shared_dpll *
-skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
-	     struct intel_encoder *encoder)
+static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
+				      struct intel_crtc_state *crtc_state,
+				      int clock)
 {
-	struct intel_shared_dpll *pll;
 	uint32_t ctrl1, cfgcr1, cfgcr2;
-	int clock = crtc_state->port_clock;
+	struct skl_wrpll_params wrpll_params = { 0, };
 
 	/*
 	 * See comment in intel_dpll_hw_state to understand why we always use 0
 	 * as the DPLL id in this function.
 	 */
-
 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
 
-	if (encoder->type == INTEL_OUTPUT_HDMI) {
-		struct skl_wrpll_params wrpll_params = { 0, };
+	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
 
-		ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
+	if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
+		return false;
 
-		if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
-			return NULL;
+	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
+		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
+		wrpll_params.dco_integer;
+
+	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
+		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
+		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
+		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
+		wrpll_params.central_freq;
+
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
+	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
+	return true;
+}
 
-		cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
-			 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
-			 wrpll_params.dco_integer;
 
-		cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
-			 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
-			 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
-			 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
-			 wrpll_params.central_freq;
+bool skl_ddi_dp_set_dpll_hw_state(int clock,
+				  struct intel_dpll_hw_state *dpll_hw_state)
+{
+	uint32_t ctrl1;
+
+	/*
+	 * See comment in intel_dpll_hw_state to understand why we always use 0
+	 * as the DPLL id in this function.
+	 */
+	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
+	switch (clock / 2) {
+	case 81000:
+		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
+		break;
+	case 135000:
+		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
+		break;
+	case 270000:
+		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
+		break;
+		/* eDP 1.4 rates */
+	case 162000:
+		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
+		break;
+	case 108000:
+		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
+		break;
+	case 216000:
+		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
+		break;
+	}
+
+	dpll_hw_state->ctrl1 = ctrl1;
+	return true;
+}
+
+static struct intel_shared_dpll *
+skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+	     struct intel_encoder *encoder)
+{
+	struct intel_shared_dpll *pll;
+	int clock = crtc_state->port_clock;
+	bool bret;
+	struct intel_dpll_hw_state dpll_hw_state;
+
+	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
+
+	if (encoder->type == INTEL_OUTPUT_HDMI) {
+		bret = skl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
+		if (!bret) {
+			DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
+			return NULL;
+		}
 	} else if (encoder->type == INTEL_OUTPUT_DP ||
 		   encoder->type == INTEL_OUTPUT_DP_MST ||
 		   encoder->type == INTEL_OUTPUT_EDP) {
-		switch (crtc_state->port_clock / 2) {
-		case 81000:
-			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
-			break;
-		case 135000:
-			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
-			break;
-		case 270000:
-			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
-			break;
-		/* eDP 1.4 rates */
-		case 162000:
-			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
-			break;
-		case 108000:
-			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
-			break;
-		case 216000:
-			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
-			break;
+		bret = skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
+		if (!bret) {
+			DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
+			return NULL;
 		}
-
-		cfgcr1 = cfgcr2 = 0;
+		crtc_state->dpll_hw_state = dpll_hw_state;
 	} else {
 		return NULL;
 	}
 
-	memset(&crtc_state->dpll_hw_state, 0,
-	       sizeof(crtc_state->dpll_hw_state));
-
-	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
-	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
-	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
-
 	if (encoder->type == INTEL_OUTPUT_EDP)
 		pll = intel_find_shared_dpll(crtc, crtc_state,
 					     DPLL_ID_SKL_DPLL0,
@@ -1274,8 +1349,6 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
 	if (!pll)
 		return NULL;
 
-	crtc_state->ddi_pll_sel = pll->id;
-
 	intel_reference_shared_dpll(pll, crtc_state);
 
 	return pll;
@@ -1484,6 +1557,8 @@ struct bxt_clk_div {
 	uint32_t m2_frac;
 	bool m2_frac_en;
 	uint32_t n;
+
+	int vco;
 };
 
 /* pre-calculated values for DP linkrates */
@@ -1497,57 +1572,60 @@ static const struct bxt_clk_div bxt_dp_clk_val[] = {
 	{432000, 3, 1, 32, 1677722, 1, 1}
 };
 
-static struct intel_shared_dpll *
-bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
-	     struct intel_encoder *encoder)
+static bool
+bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
+			  struct intel_crtc_state *crtc_state, int clock,
+			  struct bxt_clk_div *clk_div)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_shared_dpll *pll;
-	enum intel_dpll_id i;
-	struct intel_digital_port *intel_dig_port;
-	struct bxt_clk_div clk_div = {0};
-	int vco = 0;
-	uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
-	uint32_t lanestagger;
-	int clock = crtc_state->port_clock;
+	struct dpll best_clock;
 
-	if (encoder->type == INTEL_OUTPUT_HDMI) {
-		struct dpll best_clock;
+	/* Calculate HDMI div */
+	/*
+	 * FIXME: tie the following calculation into
+	 * i9xx_crtc_compute_clock
+	 */
+	if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
+		DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
+				 clock, pipe_name(intel_crtc->pipe));
+		return false;
+	}
 
-		/* Calculate HDMI div */
-		/*
-		 * FIXME: tie the following calculation into
-		 * i9xx_crtc_compute_clock
-		 */
-		if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
-			DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
-					 clock, pipe_name(crtc->pipe));
-			return NULL;
-		}
+	clk_div->p1 = best_clock.p1;
+	clk_div->p2 = best_clock.p2;
+	WARN_ON(best_clock.m1 != 2);
+	clk_div->n = best_clock.n;
+	clk_div->m2_int = best_clock.m2 >> 22;
+	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
+	clk_div->m2_frac_en = clk_div->m2_frac != 0;
 
-		clk_div.p1 = best_clock.p1;
-		clk_div.p2 = best_clock.p2;
-		WARN_ON(best_clock.m1 != 2);
-		clk_div.n = best_clock.n;
-		clk_div.m2_int = best_clock.m2 >> 22;
-		clk_div.m2_frac = best_clock.m2 & ((1 << 22) - 1);
-		clk_div.m2_frac_en = clk_div.m2_frac != 0;
+	clk_div->vco = best_clock.vco;
 
-		vco = best_clock.vco;
-	} else if (encoder->type == INTEL_OUTPUT_DP ||
-		   encoder->type == INTEL_OUTPUT_EDP) {
-		int i;
+	return true;
+}
 
-		clk_div = bxt_dp_clk_val[0];
-		for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
-			if (bxt_dp_clk_val[i].clock == clock) {
-				clk_div = bxt_dp_clk_val[i];
-				break;
-			}
+static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div)
+{
+	int i;
+
+	*clk_div = bxt_dp_clk_val[0];
+	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
+		if (bxt_dp_clk_val[i].clock == clock) {
+			*clk_div = bxt_dp_clk_val[i];
+			break;
 		}
-		vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2;
 	}
 
+	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
+}
+
+static bool bxt_ddi_set_dpll_hw_state(int clock,
+			  struct bxt_clk_div *clk_div,
+			  struct intel_dpll_hw_state *dpll_hw_state)
+{
+	int vco = clk_div->vco;
+	uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
+	uint32_t lanestagger;
+
 	if (vco >= 6200000 && vco <= 6700000) {
 		prop_coef = 4;
 		int_coef = 9;
@@ -1566,12 +1644,9 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
 		targ_cnt = 9;
 	} else {
 		DRM_ERROR("Invalid VCO\n");
-		return NULL;
+		return false;
 	}
 
-	memset(&crtc_state->dpll_hw_state, 0,
-	       sizeof(crtc_state->dpll_hw_state));
-
 	if (clock > 270000)
 		lanestagger = 0x18;
 	else if (clock > 135000)
@@ -1583,35 +1658,75 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
 	else
 		lanestagger = 0x02;
 
-	crtc_state->dpll_hw_state.ebb0 =
-		PORT_PLL_P1(clk_div.p1) | PORT_PLL_P2(clk_div.p2);
-	crtc_state->dpll_hw_state.pll0 = clk_div.m2_int;
-	crtc_state->dpll_hw_state.pll1 = PORT_PLL_N(clk_div.n);
-	crtc_state->dpll_hw_state.pll2 = clk_div.m2_frac;
+	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
+	dpll_hw_state->pll0 = clk_div->m2_int;
+	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
+	dpll_hw_state->pll2 = clk_div->m2_frac;
 
-	if (clk_div.m2_frac_en)
-		crtc_state->dpll_hw_state.pll3 =
-			PORT_PLL_M2_FRAC_ENABLE;
+	if (clk_div->m2_frac_en)
+		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
 
-	crtc_state->dpll_hw_state.pll6 =
-		prop_coef | PORT_PLL_INT_COEFF(int_coef);
-	crtc_state->dpll_hw_state.pll6 |=
-		PORT_PLL_GAIN_CTL(gain_ctl);
+	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
+	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
 
-	crtc_state->dpll_hw_state.pll8 = targ_cnt;
+	dpll_hw_state->pll8 = targ_cnt;
 
-	crtc_state->dpll_hw_state.pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
+	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
 
-	crtc_state->dpll_hw_state.pll10 =
+	dpll_hw_state->pll10 =
 		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
 		| PORT_PLL_DCO_AMP_OVR_EN_H;
 
-	crtc_state->dpll_hw_state.ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
+	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
 
-	crtc_state->dpll_hw_state.pcsdw12 =
-		LANESTAGGER_STRAP_OVRD | lanestagger;
+	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
 
-	intel_dig_port = enc_to_dig_port(&encoder->base);
+	return true;
+}
+
+bool bxt_ddi_dp_set_dpll_hw_state(int clock,
+			  struct intel_dpll_hw_state *dpll_hw_state)
+{
+	struct bxt_clk_div clk_div = {0};
+
+	bxt_ddi_dp_pll_dividers(clock, &clk_div);
+
+	return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
+}
+
+static struct intel_shared_dpll *
+bxt_get_dpll(struct intel_crtc *crtc,
+		struct intel_crtc_state *crtc_state,
+		struct intel_encoder *encoder)
+{
+	struct bxt_clk_div clk_div = {0};
+	struct intel_dpll_hw_state dpll_hw_state = {0};
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct intel_digital_port *intel_dig_port;
+	struct intel_shared_dpll *pll;
+	int i, clock = crtc_state->port_clock;
+
+	if (encoder->type == INTEL_OUTPUT_HDMI
+	    && !bxt_ddi_hdmi_pll_dividers(crtc, crtc_state,
+					  clock, &clk_div))
+		return NULL;
+
+	if ((encoder->type == INTEL_OUTPUT_DP ||
+	     encoder->type == INTEL_OUTPUT_EDP) &&
+	    !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
+		return NULL;
+
+	memset(&crtc_state->dpll_hw_state, 0,
+	       sizeof(crtc_state->dpll_hw_state));
+
+	crtc_state->dpll_hw_state = dpll_hw_state;
+
+	if (encoder->type == INTEL_OUTPUT_DP_MST) {
+		struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+
+		intel_dig_port = intel_mst->primary;
+	} else
+		intel_dig_port = enc_to_dig_port(&encoder->base);
 
 	/* 1:1 mapping between ports and PLLs */
 	i = (enum intel_dpll_id) intel_dig_port->port;
@@ -1622,9 +1737,6 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
 
 	intel_reference_shared_dpll(pll, crtc_state);
 
-	/* shared DPLL id 0 is DPLL A */
-	crtc_state->ddi_pll_sel = pll->id;
-
 	return pll;
 }
 

+ 15 - 0
drivers/gpu/drm/i915/intel_dpll_mgr.h

@@ -160,5 +160,20 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc);
 void intel_shared_dpll_commit(struct drm_atomic_state *state);
 void intel_shared_dpll_init(struct drm_device *dev);
 
+/* BXT dpll related functions */
+bool bxt_ddi_dp_set_dpll_hw_state(int clock,
+			  struct intel_dpll_hw_state *dpll_hw_state);
+
+
+/* SKL dpll related functions */
+bool skl_ddi_dp_set_dpll_hw_state(int clock,
+				  struct intel_dpll_hw_state *dpll_hw_state);
+struct intel_shared_dpll *skl_find_link_pll(struct drm_i915_private *dev_priv,
+					    int clock);
+
+
+/* HSW dpll related functions */
+struct intel_shared_dpll *hsw_ddi_dp_get_dpll(struct intel_encoder *encoder,
+					      int clock);
 
 #endif /* _INTEL_DPLL_MGR_H_ */

+ 65 - 31
drivers/gpu/drm/i915/intel_drv.h

@@ -52,11 +52,15 @@
  */
 #define _wait_for(COND, US, W) ({ \
 	unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1;	\
-	int ret__ = 0;							\
-	while (!(COND)) {						\
-		if (time_after(jiffies, timeout__)) {			\
-			if (!(COND))					\
-				ret__ = -ETIMEDOUT;			\
+	int ret__;							\
+	for (;;) {							\
+		bool expired__ = time_after(jiffies, timeout__);	\
+		if (COND) {						\
+			ret__ = 0;					\
+			break;						\
+		}							\
+		if (expired__) {					\
+			ret__ = -ETIMEDOUT;				\
 			break;						\
 		}							\
 		if ((W) && drm_can_sleep()) {				\
@@ -205,14 +209,26 @@ struct intel_encoder {
 	unsigned int cloneable;
 	void (*hot_plug)(struct intel_encoder *);
 	bool (*compute_config)(struct intel_encoder *,
-			       struct intel_crtc_state *);
-	void (*pre_pll_enable)(struct intel_encoder *);
-	void (*pre_enable)(struct intel_encoder *);
-	void (*enable)(struct intel_encoder *);
-	void (*mode_set)(struct intel_encoder *intel_encoder);
-	void (*disable)(struct intel_encoder *);
-	void (*post_disable)(struct intel_encoder *);
-	void (*post_pll_disable)(struct intel_encoder *);
+			       struct intel_crtc_state *,
+			       struct drm_connector_state *);
+	void (*pre_pll_enable)(struct intel_encoder *,
+			       struct intel_crtc_state *,
+			       struct drm_connector_state *);
+	void (*pre_enable)(struct intel_encoder *,
+			   struct intel_crtc_state *,
+			   struct drm_connector_state *);
+	void (*enable)(struct intel_encoder *,
+		       struct intel_crtc_state *,
+		       struct drm_connector_state *);
+	void (*disable)(struct intel_encoder *,
+			struct intel_crtc_state *,
+			struct drm_connector_state *);
+	void (*post_disable)(struct intel_encoder *,
+			     struct intel_crtc_state *,
+			     struct drm_connector_state *);
+	void (*post_pll_disable)(struct intel_encoder *,
+				 struct intel_crtc_state *,
+				 struct drm_connector_state *);
 	/* Read out the current hw state of this connector, returning true if
 	 * the encoder is active. If the encoder is enabled it also set the pipe
 	 * it is connected to in the pipe parameter. */
@@ -578,12 +594,6 @@ struct intel_crtc_state {
 	/* Selected dpll when shared or NULL. */
 	struct intel_shared_dpll *shared_dpll;
 
-	/*
-	 * - PORT_CLK_SEL for DDI ports on HSW/BDW.
-	 * - enum skl_dpll on SKL
-	 */
-	uint32_t ddi_pll_sel;
-
 	/* Actual register state of the dpll, for shared dpll cross-checking. */
 	struct intel_dpll_hw_state dpll_hw_state;
 
@@ -700,8 +710,8 @@ struct intel_crtc {
 
 	struct intel_crtc_state *config;
 
-	/* reset counter value when the last flip was submitted */
-	unsigned int reset_counter;
+	/* global reset count when the last flip was submitted */
+	unsigned int reset_count;
 
 	/* Access to these should be protected by dev_priv->irq_lock. */
 	bool cpu_fifo_underrun_disabled;
@@ -872,6 +882,7 @@ struct intel_dp {
 	bool link_mst;
 	bool has_audio;
 	bool detect_done;
+	bool channel_eq_status;
 	enum hdmi_force_audio force_audio;
 	bool limited_color_range;
 	bool color_range_auto;
@@ -1124,7 +1135,10 @@ void intel_crt_reset(struct drm_encoder *encoder);
 
 /* intel_ddi.c */
 void intel_ddi_clk_select(struct intel_encoder *encoder,
-			  const struct intel_crtc_state *pipe_config);
+			  struct intel_shared_dpll *pll);
+void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
+				struct intel_crtc_state *old_crtc_state,
+				struct drm_connector_state *old_conn_state);
 void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder);
 void hsw_fdi_link_train(struct drm_crtc *crtc);
 void intel_ddi_init(struct drm_device *dev, enum port port);
@@ -1140,7 +1154,6 @@ bool intel_ddi_pll_select(struct intel_crtc *crtc,
 void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
 void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
 bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
-void intel_ddi_fdi_disable(struct drm_crtc *crtc);
 void intel_ddi_get_config(struct intel_encoder *encoder,
 			  struct intel_crtc_state *pipe_config);
 struct intel_encoder *
@@ -1151,7 +1164,8 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
 			 struct intel_crtc_state *pipe_config);
 void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
 uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
-
+struct intel_shared_dpll *intel_ddi_get_link_dpll(struct intel_dp *intel_dp,
+						  int clock);
 unsigned int intel_fb_align_height(struct drm_device *dev,
 				   unsigned int height,
 				   uint32_t pixel_format,
@@ -1171,6 +1185,8 @@ void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco);
 void intel_update_rawclk(struct drm_i915_private *dev_priv);
 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
 		      const char *name, u32 reg, int ref_freq);
+void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
+void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
 extern const struct drm_plane_funcs intel_plane_funcs;
 void intel_init_display_hooks(struct drm_i915_private *dev_priv);
 unsigned int intel_fb_xy_to_linear(int x, int y,
@@ -1367,7 +1383,8 @@ bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port
 bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 			     struct intel_connector *intel_connector);
 void intel_dp_set_link_params(struct intel_dp *intel_dp,
-			      const struct intel_crtc_state *pipe_config);
+			      int link_rate, uint8_t lane_count,
+			      bool link_mst);
 void intel_dp_start_link_train(struct intel_dp *intel_dp);
 void intel_dp_stop_link_train(struct intel_dp *intel_dp);
 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
@@ -1376,7 +1393,8 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
 void intel_dp_encoder_destroy(struct drm_encoder *encoder);
 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
 bool intel_dp_compute_config(struct intel_encoder *encoder,
-			     struct intel_crtc_state *pipe_config);
+			     struct intel_crtc_state *pipe_config,
+			     struct drm_connector_state *conn_state);
 bool intel_dp_is_edp(struct drm_device *dev, enum port port);
 enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
 				  bool long_hpd);
@@ -1394,14 +1412,14 @@ void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
 void intel_plane_destroy(struct drm_plane *plane);
-void intel_edp_drrs_enable(struct intel_dp *intel_dp);
-void intel_edp_drrs_disable(struct intel_dp *intel_dp);
+void intel_edp_drrs_enable(struct intel_dp *intel_dp,
+			   struct intel_crtc_state *crtc_state);
+void intel_edp_drrs_disable(struct intel_dp *intel_dp,
+			   struct intel_crtc_state *crtc_state);
 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
 			       unsigned int frontbuffer_bits);
 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
 			  unsigned int frontbuffer_bits);
-bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
-				  struct intel_digital_port *port);
 
 void
 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
@@ -1501,7 +1519,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 			       struct intel_connector *intel_connector);
 struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
 bool intel_hdmi_compute_config(struct intel_encoder *encoder,
-			       struct intel_crtc_state *pipe_config);
+			       struct intel_crtc_state *pipe_config,
+			       struct drm_connector_state *conn_state);
 void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
 
 
@@ -1722,6 +1741,21 @@ void ilk_wm_get_hw_state(struct drm_device *dev);
 void skl_wm_get_hw_state(struct drm_device *dev);
 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
 			  struct skl_ddb_allocation *ddb /* out */);
+bool skl_can_enable_sagv(struct drm_atomic_state *state);
+int skl_enable_sagv(struct drm_i915_private *dev_priv);
+int skl_disable_sagv(struct drm_i915_private *dev_priv);
+bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
+			       const struct skl_ddb_allocation *new,
+			       enum pipe pipe);
+bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
+				 const struct skl_ddb_allocation *old,
+				 const struct skl_ddb_allocation *new,
+				 enum pipe pipe);
+void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
+			 const struct skl_wm_values *wm);
+void skl_write_plane_wm(struct intel_crtc *intel_crtc,
+			const struct skl_wm_values *wm,
+			int plane);
 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
 bool ilk_disable_lp_wm(struct drm_device *dev);
 int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);

+ 25 - 15
drivers/gpu/drm/i915/intel_dsi.c

@@ -312,7 +312,8 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
 }
 
 static bool intel_dsi_compute_config(struct intel_encoder *encoder,
-				     struct intel_crtc_state *pipe_config)
+				     struct intel_crtc_state *pipe_config,
+				     struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
@@ -533,14 +534,15 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
 	intel_panel_enable_backlight(intel_dsi->attached_connector);
 }
 
-static void intel_dsi_prepare(struct intel_encoder *intel_encoder);
+static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
+			      struct intel_crtc_state *pipe_config);
 
-static void intel_dsi_pre_enable(struct intel_encoder *encoder)
+static void intel_dsi_pre_enable(struct intel_encoder *encoder,
+				 struct intel_crtc_state *pipe_config,
+				 struct drm_connector_state *conn_state)
 {
-	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 	enum port port;
 
 	DRM_DEBUG_KMS("\n");
@@ -550,9 +552,9 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
 	 * lock. It needs to be fully powered down to fix it.
 	 */
 	intel_disable_dsi_pll(encoder);
-	intel_enable_dsi_pll(encoder, crtc->config);
+	intel_enable_dsi_pll(encoder, pipe_config);
 
-	intel_dsi_prepare(encoder);
+	intel_dsi_prepare(encoder, pipe_config);
 
 	/* Panel Enable over CRC PMIC */
 	if (intel_dsi->gpio_panel)
@@ -582,7 +584,9 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
 	intel_dsi_enable(encoder);
 }
 
-static void intel_dsi_enable_nop(struct intel_encoder *encoder)
+static void intel_dsi_enable_nop(struct intel_encoder *encoder,
+				 struct intel_crtc_state *pipe_config,
+				 struct drm_connector_state *conn_state)
 {
 	DRM_DEBUG_KMS("\n");
 
@@ -592,7 +596,9 @@ static void intel_dsi_enable_nop(struct intel_encoder *encoder)
 	 */
 }
 
-static void intel_dsi_pre_disable(struct intel_encoder *encoder)
+static void intel_dsi_pre_disable(struct intel_encoder *encoder,
+				  struct intel_crtc_state *old_crtc_state,
+				  struct drm_connector_state *old_conn_state)
 {
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
 	enum port port;
@@ -694,7 +700,9 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
 	intel_disable_dsi_pll(encoder);
 }
 
-static void intel_dsi_post_disable(struct intel_encoder *encoder)
+static void intel_dsi_post_disable(struct intel_encoder *encoder,
+				   struct intel_crtc_state *pipe_config,
+				   struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -819,6 +827,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
 	u16 crtc_htotal_sw, crtc_hsync_start_sw, crtc_hsync_end_sw,
 				crtc_hblank_start_sw, crtc_hblank_end_sw;
 
+	/* FIXME: hw readout should not depend on SW state */
 	intel_crtc = to_intel_crtc(encoder->base.crtc);
 	adjusted_mode_sw = &intel_crtc->config->base.adjusted_mode;
 
@@ -1104,14 +1113,15 @@ static u32 pixel_format_to_reg(enum mipi_dsi_pixel_format fmt)
 	}
 }
 
-static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
+static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
+			      struct intel_crtc_state *pipe_config)
 {
 	struct drm_encoder *encoder = &intel_encoder->base;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
-	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 	enum port port;
 	unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
 	u32 val, tmp;
@@ -1348,7 +1358,7 @@ static int intel_dsi_set_property(struct drm_connector *connector,
 		intel_connector->panel.fitting_mode = val;
 	}
 
-	crtc = intel_attached_encoder(connector)->base.crtc;
+	crtc = connector->state->crtc;
 	if (crtc && crtc->state->enable) {
 		/*
 		 * If the CRTC is enabled, the display will be changed

+ 16 - 12
drivers/gpu/drm/i915/intel_dvo.c

@@ -174,7 +174,9 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
 	pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
 }
 
-static void intel_disable_dvo(struct intel_encoder *encoder)
+static void intel_disable_dvo(struct intel_encoder *encoder,
+			      struct intel_crtc_state *old_crtc_state,
+			      struct drm_connector_state *old_conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
@@ -186,17 +188,18 @@ static void intel_disable_dvo(struct intel_encoder *encoder)
 	I915_READ(dvo_reg);
 }
 
-static void intel_enable_dvo(struct intel_encoder *encoder)
+static void intel_enable_dvo(struct intel_encoder *encoder,
+			     struct intel_crtc_state *pipe_config,
+			     struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
-	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 	i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
 	u32 temp = I915_READ(dvo_reg);
 
 	intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
-					 &crtc->config->base.mode,
-					 &crtc->config->base.adjusted_mode);
+					 &pipe_config->base.mode,
+					 &pipe_config->base.adjusted_mode);
 
 	I915_WRITE(dvo_reg, temp | DVO_ENABLE);
 	I915_READ(dvo_reg);
@@ -235,7 +238,8 @@ intel_dvo_mode_valid(struct drm_connector *connector,
 }
 
 static bool intel_dvo_compute_config(struct intel_encoder *encoder,
-				     struct intel_crtc_state *pipe_config)
+				     struct intel_crtc_state *pipe_config,
+				     struct drm_connector_state *conn_state)
 {
 	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
 	const struct drm_display_mode *fixed_mode =
@@ -253,12 +257,13 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
 	return true;
 }
 
-static void intel_dvo_pre_enable(struct intel_encoder *encoder)
+static void intel_dvo_pre_enable(struct intel_encoder *encoder,
+				 struct intel_crtc_state *pipe_config,
+				 struct drm_connector_state *conn_state)
 {
-	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
 	int pipe = crtc->pipe;
 	u32 dvo_val;
@@ -554,7 +559,6 @@ void intel_dvo_init(struct drm_device *dev)
 		return;
 	}
 
-	drm_encoder_cleanup(&intel_encoder->base);
 	kfree(intel_dvo);
 	kfree(intel_connector);
 }

+ 14 - 1
drivers/gpu/drm/i915/intel_engine_cs.c

@@ -211,6 +211,8 @@ void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
 {
 	memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
 	clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
+	if (intel_engine_has_waiter(engine))
+		i915_queue_hangcheck(engine->i915);
 }
 
 static void intel_engine_init_requests(struct intel_engine_cs *engine)
@@ -230,7 +232,6 @@ static void intel_engine_init_requests(struct intel_engine_cs *engine)
  */
 void intel_engine_setup_common(struct intel_engine_cs *engine)
 {
-	INIT_LIST_HEAD(&engine->buffers);
 	INIT_LIST_HEAD(&engine->execlist_queue);
 	spin_lock_init(&engine->execlist_lock);
 
@@ -306,6 +307,18 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 	return 0;
 }
 
+void intel_engine_reset_irq(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (intel_engine_has_waiter(engine))
+		engine->irq_enable(engine);
+	else
+		engine->irq_disable(engine);
+	spin_unlock_irq(&dev_priv->irq_lock);
+}
+
 /**
  * intel_engines_cleanup_common - cleans up the engine state created by
  *                                the common initiailizers.

+ 2 - 4
drivers/gpu/drm/i915/intel_fbc.c

@@ -799,10 +799,8 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 	 */
 	if (cache->fb.tiling_mode != I915_TILING_X ||
 	    cache->fb.fence_reg == I915_FENCE_REG_NONE) {
-		if (INTEL_GEN(dev_priv) < 5) {
-			fbc->no_fbc_reason = "framebuffer not tiled or fenced";
-			return false;
-		}
+		fbc->no_fbc_reason = "framebuffer not tiled or fenced";
+		return false;
 	}
 	if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
 	    cache->plane.rotation != DRM_ROTATE_0) {

+ 2 - 1
drivers/gpu/drm/i915/intel_fbdev.c

@@ -183,6 +183,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 	struct intel_framebuffer *intel_fb = ifbdev->fb;
 	struct drm_device *dev = helper->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct fb_info *info;
 	struct drm_framebuffer *fb;
@@ -280,7 +281,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 	ifbdev->vma = vma;
 
 	mutex_unlock(&dev->struct_mutex);
-	vga_switcheroo_client_fb_set(dev->pdev, info);
+	vga_switcheroo_client_fb_set(pdev, info);
 	return 0;
 
 out_destroy_fbi:

+ 3 - 1
drivers/gpu/drm/i915/intel_guc.h

@@ -78,9 +78,11 @@ struct i915_guc_client {
 	uint16_t doorbell_id;
 	uint16_t padding[3];		/* Maintain alignment		*/
 
+	spinlock_t wq_lock;
 	uint32_t wq_offset;
 	uint32_t wq_size;
 	uint32_t wq_tail;
+	uint32_t wq_rsvd;
 	uint32_t no_wq_space;
 	uint32_t b_fail;
 	int retcode;
@@ -157,7 +159,7 @@ extern int intel_guc_resume(struct drm_device *dev);
 /* i915_guc_submission.c */
 int i915_guc_submission_init(struct drm_i915_private *dev_priv);
 int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
-int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
+int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
 void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
 void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
 

+ 56 - 35
drivers/gpu/drm/i915/intel_guc_loader.c

@@ -97,7 +97,7 @@ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
 	}
 };
 
-static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
+static void guc_interrupts_release(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
 	int irqs;
@@ -114,7 +114,7 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
 	I915_WRITE(GUC_WD_VECS_IER, 0);
 }
 
-static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
+static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
 	int irqs;
@@ -134,13 +134,28 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
 	I915_WRITE(GUC_WD_VECS_IER, ~irqs);
 
 	/*
-	 * If GuC has routed PM interrupts to itself, don't keep it.
-	 * and keep other interrupts those are unmasked by GuC.
-	*/
+	 * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
+	 * (unmasked) PM interrupts to the GuC. All other bits of this
+	 * register *disable* generation of a specific interrupt.
+	 *
+	 * 'pm_intr_keep' indicates bits that are NOT to be set when
+	 * writing to the PM interrupt mask register, i.e. interrupts
+	 * that must not be disabled.
+	 *
+	 * If the GuC is handling these interrupts, then we must not let
+	 * the PM code disable ANY interrupt that the GuC is expecting.
+	 * So for each ENABLED (0) bit in this register, we must SET the
+	 * bit in pm_intr_keep so that it's left enabled for the GuC.
+	 *
+	 * OTOH the REDIRECT_TO_GUC bit is initially SET in pm_intr_keep
+	 * (so interrupts go to the DISPLAY unit at first); but here we
+	 * need to CLEAR that bit, which will result in the register bit
+	 * being left SET!
+	 */
 	tmp = I915_READ(GEN6_PMINTRMSK);
-	if (tmp & GEN8_PMINTR_REDIRECT_TO_NON_DISP) {
-		dev_priv->rps.pm_intr_keep |= ~(tmp & ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
-		dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+	if (tmp & GEN8_PMINTR_REDIRECT_TO_GUC) {
+		dev_priv->rps.pm_intr_keep |= ~tmp;
+		dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_GUC;
 	}
 }
 
@@ -152,17 +167,24 @@ static u32 get_gttype(struct drm_i915_private *dev_priv)
 
 static u32 get_core_family(struct drm_i915_private *dev_priv)
 {
-	switch (INTEL_INFO(dev_priv)->gen) {
+	u32 gen = INTEL_GEN(dev_priv);
+
+	switch (gen) {
 	case 9:
 		return GFXCORE_FAMILY_GEN9;
 
 	default:
-		DRM_ERROR("GUC: unsupported core family\n");
+		WARN(1, "GEN%d does not support GuC operation!\n", gen);
 		return GFXCORE_FAMILY_UNKNOWN;
 	}
 }
 
-static void set_guc_init_params(struct drm_i915_private *dev_priv)
+/*
+ * Initialise the GuC parameter block before starting the firmware
+ * transfer. These parameters are read by the firmware on startup
+ * and cannot be changed thereafter.
+ */
+static void guc_params_init(struct drm_i915_private *dev_priv)
 {
 	struct intel_guc *guc = &dev_priv->guc;
 	u32 params[GUC_CTL_MAX_DWORDS];
@@ -375,11 +397,11 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
 		I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
 					    I915_READ(GEN7_MISCCPCTL)));
 
-		/* allows for 5us before GT can go to RC6 */
+		/* allows for 5us (in 10ns units) before GT can go to RC6 */
 		I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
 	}
 
-	set_guc_init_params(dev_priv);
+	guc_params_init(dev_priv);
 
 	ret = guc_ucode_xfer_dma(dev_priv, vma);
 
@@ -394,7 +416,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
 	return ret;
 }
 
-static int i915_reset_guc(struct drm_i915_private *dev_priv)
+static int guc_hw_reset(struct drm_i915_private *dev_priv)
 {
 	int ret;
 	u32 guc_status;
@@ -447,7 +469,7 @@ int intel_guc_setup(struct drm_device *dev)
 		goto fail;
 	} else if (*fw_path == '\0') {
 		/* Device has a GuC but we don't know what f/w to load? */
-		DRM_INFO("No GuC firmware known for this platform\n");
+		WARN(1, "No GuC firmware known for this platform!\n");
 		err = -ENODEV;
 		goto fail;
 	}
@@ -461,7 +483,7 @@ int intel_guc_setup(struct drm_device *dev)
 		goto fail;
 	}
 
-	direct_interrupts_to_host(dev_priv);
+	guc_interrupts_release(dev_priv);
 
 	guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
 
@@ -484,11 +506,9 @@ int intel_guc_setup(struct drm_device *dev)
 		 * Always reset the GuC just before (re)loading, so
 		 * that the state and timing are fairly predictable
 		 */
-		err = i915_reset_guc(dev_priv);
-		if (err) {
-			DRM_ERROR("GuC reset failed: %d\n", err);
+		err = guc_hw_reset(dev_priv);
+		if (err)
 			goto fail;
-		}
 
 		err = guc_ucode_xfer(dev_priv);
 		if (!err)
@@ -511,7 +531,7 @@ int intel_guc_setup(struct drm_device *dev)
 		err = i915_guc_submission_enable(dev_priv);
 		if (err)
 			goto fail;
-		direct_interrupts_to_guc(dev_priv);
+		guc_interrupts_capture(dev_priv);
 	}
 
 	return 0;
@@ -520,7 +540,7 @@ fail:
 	if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
 		guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
 
-	direct_interrupts_to_host(dev_priv);
+	guc_interrupts_release(dev_priv);
 	i915_guc_submission_disable(dev_priv);
 	i915_guc_submission_fini(dev_priv);
 
@@ -546,15 +566,15 @@ fail:
 	else if (err == 0)
 		DRM_INFO("GuC firmware load skipped\n");
 	else if (ret != -EIO)
-		DRM_INFO("GuC firmware load failed: %d\n", err);
+		DRM_NOTE("GuC firmware load failed: %d\n", err);
 	else
-		DRM_ERROR("GuC firmware load failed: %d\n", err);
+		DRM_WARN("GuC firmware load failed: %d\n", err);
 
 	if (i915.enable_guc_submission) {
 		if (fw_path == NULL)
 			DRM_INFO("GuC submission without firmware not supported\n");
 		if (ret == 0)
-			DRM_INFO("Falling back from GuC submission to execlist mode\n");
+			DRM_NOTE("Falling back from GuC submission to execlist mode\n");
 		else
 			DRM_ERROR("GuC init failed: %d\n", ret);
 	}
@@ -565,6 +585,7 @@ fail:
 
 static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
 {
+	struct pci_dev *pdev = dev->pdev;
 	struct drm_i915_gem_object *obj;
 	const struct firmware *fw;
 	struct guc_css_header *css;
@@ -574,7 +595,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
 	DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
 		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
 
-	err = request_firmware(&fw, guc_fw->guc_fw_path, &dev->pdev->dev);
+	err = request_firmware(&fw, guc_fw->guc_fw_path, &pdev->dev);
 	if (err)
 		goto fail;
 	if (!fw)
@@ -585,7 +606,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
 
 	/* Check the size of the blob before examining buffer contents */
 	if (fw->size < sizeof(struct guc_css_header)) {
-		DRM_ERROR("Firmware header is missing\n");
+		DRM_NOTE("Firmware header is missing\n");
 		goto fail;
 	}
 
@@ -597,7 +618,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
 		css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
 
 	if (guc_fw->header_size != sizeof(struct guc_css_header)) {
-		DRM_ERROR("CSS header definition mismatch\n");
+		DRM_NOTE("CSS header definition mismatch\n");
 		goto fail;
 	}
 
@@ -607,7 +628,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
 
 	/* now RSA */
 	if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
-		DRM_ERROR("RSA key size is bad\n");
+		DRM_NOTE("RSA key size is bad\n");
 		goto fail;
 	}
 	guc_fw->rsa_offset = guc_fw->ucode_offset + guc_fw->ucode_size;
@@ -616,14 +637,14 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
 	/* At least, it should have header, uCode and RSA. Size of all three. */
 	size = guc_fw->header_size + guc_fw->ucode_size + guc_fw->rsa_size;
 	if (fw->size < size) {
-		DRM_ERROR("Missing firmware components\n");
+		DRM_NOTE("Missing firmware components\n");
 		goto fail;
 	}
 
 	/* Header and uCode will be loaded to WOPCM. Size of the two. */
 	size = guc_fw->header_size + guc_fw->ucode_size;
 	if (size > guc_wopcm_size(to_i915(dev))) {
-		DRM_ERROR("Firmware is too large to fit in WOPCM\n");
+		DRM_NOTE("Firmware is too large to fit in WOPCM\n");
 		goto fail;
 	}
 
@@ -638,7 +659,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
 
 	if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
 	    guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
-		DRM_ERROR("GuC firmware version %d.%d, required %d.%d\n",
+		DRM_NOTE("GuC firmware version %d.%d, required %d.%d\n",
 			guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
 			guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
 		err = -ENOEXEC;
@@ -668,10 +689,10 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
 	return;
 
 fail:
+	DRM_WARN("Failed to fetch valid GuC firmware from %s (error %d)\n",
+		 guc_fw->guc_fw_path, err);
 	DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
 		err, fw, guc_fw->guc_fw_obj);
-	DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
-		  guc_fw->guc_fw_path, err);
 
 	mutex_lock(&dev->struct_mutex);
 	obj = guc_fw->guc_fw_obj;
@@ -752,7 +773,7 @@ void intel_guc_fini(struct drm_device *dev)
 	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
 
 	mutex_lock(&dev->struct_mutex);
-	direct_interrupts_to_host(dev_priv);
+	guc_interrupts_release(dev_priv);
 	i915_guc_submission_disable(dev_priv);
 	i915_guc_submission_fini(dev_priv);
 

+ 64 - 54
drivers/gpu/drm/i915/intel_hdmi.c

@@ -985,7 +985,9 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
 	intel_audio_codec_enable(encoder);
 }
 
-static void g4x_enable_hdmi(struct intel_encoder *encoder)
+static void g4x_enable_hdmi(struct intel_encoder *encoder,
+			    struct intel_crtc_state *pipe_config,
+			    struct drm_connector_state *conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1006,7 +1008,9 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder)
 		intel_enable_hdmi_audio(encoder);
 }
 
-static void ibx_enable_hdmi(struct intel_encoder *encoder)
+static void ibx_enable_hdmi(struct intel_encoder *encoder,
+			    struct intel_crtc_state *pipe_config,
+			    struct drm_connector_state *conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1055,7 +1059,9 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder)
 		intel_enable_hdmi_audio(encoder);
 }
 
-static void cpt_enable_hdmi(struct intel_encoder *encoder)
+static void cpt_enable_hdmi(struct intel_encoder *encoder,
+			    struct intel_crtc_state *pipe_config,
+			    struct drm_connector_state *conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1108,11 +1114,15 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder)
 		intel_enable_hdmi_audio(encoder);
 }
 
-static void vlv_enable_hdmi(struct intel_encoder *encoder)
+static void vlv_enable_hdmi(struct intel_encoder *encoder,
+			    struct intel_crtc_state *pipe_config,
+			    struct drm_connector_state *conn_state)
 {
 }
 
-static void intel_disable_hdmi(struct intel_encoder *encoder)
+static void intel_disable_hdmi(struct intel_encoder *encoder,
+			       struct intel_crtc_state *old_crtc_state,
+			       struct drm_connector_state *old_conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1164,17 +1174,21 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
 	intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
 }
 
-static void g4x_disable_hdmi(struct intel_encoder *encoder)
+static void g4x_disable_hdmi(struct intel_encoder *encoder,
+			     struct intel_crtc_state *old_crtc_state,
+			     struct drm_connector_state *old_conn_state)
 {
 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 
 	if (crtc->config->has_audio)
 		intel_audio_codec_disable(encoder);
 
-	intel_disable_hdmi(encoder);
+	intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
 }
 
-static void pch_disable_hdmi(struct intel_encoder *encoder)
+static void pch_disable_hdmi(struct intel_encoder *encoder,
+			     struct intel_crtc_state *old_crtc_state,
+			     struct drm_connector_state *old_conn_state)
 {
 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 
@@ -1182,9 +1196,11 @@ static void pch_disable_hdmi(struct intel_encoder *encoder)
 		intel_audio_codec_disable(encoder);
 }
 
-static void pch_post_disable_hdmi(struct intel_encoder *encoder)
+static void pch_post_disable_hdmi(struct intel_encoder *encoder,
+				  struct intel_crtc_state *old_crtc_state,
+				  struct drm_connector_state *old_conn_state)
 {
-	intel_disable_hdmi(encoder);
+	intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
 }
 
 static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv)
@@ -1285,7 +1301,8 @@ static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
 }
 
 bool intel_hdmi_compute_config(struct intel_encoder *encoder,
-			       struct intel_crtc_state *pipe_config)
+			       struct intel_crtc_state *pipe_config,
+			       struct drm_connector_state *conn_state)
 {
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	struct drm_device *dev = encoder->base.dev;
@@ -1422,24 +1439,22 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
 }
 
 static bool
-intel_hdmi_set_edid(struct drm_connector *connector, bool force)
+intel_hdmi_set_edid(struct drm_connector *connector)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
 	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
-	struct edid *edid = NULL;
+	struct edid *edid;
 	bool connected = false;
 
-	if (force) {
-		intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+	intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
 
-		edid = drm_get_edid(connector,
-				    intel_gmbus_get_adapter(dev_priv,
-				    intel_hdmi->ddc_bus));
+	edid = drm_get_edid(connector,
+			    intel_gmbus_get_adapter(dev_priv,
+			    intel_hdmi->ddc_bus));
 
-		intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
+	intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
 
-		intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
-	}
+	intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
 
 	to_intel_connector(connector)->detect_edid = edid;
 	if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -1465,37 +1480,16 @@ static enum drm_connector_status
 intel_hdmi_detect(struct drm_connector *connector, bool force)
 {
 	enum drm_connector_status status;
-	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
-	bool live_status = false;
-	unsigned int try;
 
 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
 		      connector->base.id, connector->name);
 
 	intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
 
-	for (try = 0; !live_status && try < 9; try++) {
-		if (try)
-			msleep(10);
-		live_status = intel_digital_port_connected(dev_priv,
-				hdmi_to_dig_port(intel_hdmi));
-	}
-
-	if (!live_status) {
-		DRM_DEBUG_KMS("HDMI live status down\n");
-		/*
-		 * Live status register is not reliable on all intel platforms.
-		 * So consider live_status only for certain platforms, for
-		 * others, read EDID to determine presence of sink.
-		 */
-		if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
-			live_status = true;
-	}
-
 	intel_hdmi_unset_edid(connector);
 
-	if (intel_hdmi_set_edid(connector, live_status)) {
+	if (intel_hdmi_set_edid(connector)) {
 		struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
 
 		hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
@@ -1521,7 +1515,7 @@ intel_hdmi_force(struct drm_connector *connector)
 	if (connector->status != connector_status_connected)
 		return;
 
-	intel_hdmi_set_edid(connector, true);
+	intel_hdmi_set_edid(connector);
 	hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
 }
 
@@ -1638,7 +1632,9 @@ done:
 	return 0;
 }
 
-static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
+static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
+				  struct intel_crtc_state *pipe_config,
+				  struct drm_connector_state *conn_state)
 {
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
@@ -1651,7 +1647,9 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
 				   adjusted_mode);
 }
 
-static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
+static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
+				struct intel_crtc_state *pipe_config,
+				struct drm_connector_state *conn_state)
 {
 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
 	struct intel_hdmi *intel_hdmi = &dport->hdmi;
@@ -1671,37 +1669,47 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
 				   intel_crtc->config->has_hdmi_sink,
 				   adjusted_mode);
 
-	g4x_enable_hdmi(encoder);
+	g4x_enable_hdmi(encoder, pipe_config, conn_state);
 
 	vlv_wait_port_ready(dev_priv, dport, 0x0);
 }
 
-static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
+				    struct intel_crtc_state *pipe_config,
+				    struct drm_connector_state *conn_state)
 {
 	intel_hdmi_prepare(encoder);
 
 	vlv_phy_pre_pll_enable(encoder);
 }
 
-static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
+				    struct intel_crtc_state *pipe_config,
+				    struct drm_connector_state *conn_state)
 {
 	intel_hdmi_prepare(encoder);
 
 	chv_phy_pre_pll_enable(encoder);
 }
 
-static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder)
+static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder,
+				      struct intel_crtc_state *old_crtc_state,
+				      struct drm_connector_state *old_conn_state)
 {
 	chv_phy_post_pll_disable(encoder);
 }
 
-static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
+static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
+				  struct intel_crtc_state *old_crtc_state,
+				  struct drm_connector_state *old_conn_state)
 {
 	/* Reset lanes to avoid HDMI flicker (VLV w/a) */
 	vlv_phy_reset_lanes(encoder);
 }
 
-static void chv_hdmi_post_disable(struct intel_encoder *encoder)
+static void chv_hdmi_post_disable(struct intel_encoder *encoder,
+				  struct intel_crtc_state *old_crtc_state,
+				  struct drm_connector_state *old_conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1714,7 +1722,9 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder)
 	mutex_unlock(&dev_priv->sb_lock);
 }
 
-static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
+static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
+				struct intel_crtc_state *pipe_config,
+				struct drm_connector_state *conn_state)
 {
 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
 	struct intel_hdmi *intel_hdmi = &dport->hdmi;
@@ -1734,7 +1744,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
 				   intel_crtc->config->has_hdmi_sink,
 				   adjusted_mode);
 
-	g4x_enable_hdmi(encoder);
+	g4x_enable_hdmi(encoder, pipe_config, conn_state);
 
 	vlv_wait_port_ready(dev_priv, dport, 0x0);
 

+ 65 - 69
drivers/gpu/drm/i915/intel_i2c.c

@@ -255,67 +255,59 @@ intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin)
 	algo->data = bus;
 }
 
-static int
-gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
-		     u32 gmbus2_status,
-		     u32 gmbus4_irq_en)
+static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
 {
-	int i;
-	u32 gmbus2 = 0;
 	DEFINE_WAIT(wait);
-
-	if (!HAS_GMBUS_IRQ(dev_priv))
-		gmbus4_irq_en = 0;
+	u32 gmbus2;
+	int ret;
 
 	/* Important: The hw handles only the first bit, so set only one! Since
 	 * we also need to check for NAKs besides the hw ready/idle signal, we
-	 * need to wake up periodically and check that ourselves. */
-	I915_WRITE(GMBUS4, gmbus4_irq_en);
-
-	for (i = 0; i < msecs_to_jiffies_timeout(50); i++) {
-		prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
-				TASK_UNINTERRUPTIBLE);
+	 * need to wake up periodically and check that ourselves.
+	 */
+	if (!HAS_GMBUS_IRQ(dev_priv))
+		irq_en = 0;
 
-		gmbus2 = I915_READ_NOTRACE(GMBUS2);
-		if (gmbus2 & (GMBUS_SATOER | gmbus2_status))
-			break;
+	add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+	I915_WRITE_FW(GMBUS4, irq_en);
 
-		schedule_timeout(1);
-	}
-	finish_wait(&dev_priv->gmbus_wait_queue, &wait);
+	status |= GMBUS_SATOER;
+	ret = wait_for_us((gmbus2 = I915_READ_FW(GMBUS2)) & status, 2);
+	if (ret)
+		ret = wait_for((gmbus2 = I915_READ_FW(GMBUS2)) & status, 50);
 
-	I915_WRITE(GMBUS4, 0);
+	I915_WRITE_FW(GMBUS4, 0);
+	remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
 
 	if (gmbus2 & GMBUS_SATOER)
 		return -ENXIO;
-	if (gmbus2 & gmbus2_status)
-		return 0;
-	return -ETIMEDOUT;
+
+	return ret;
 }
 
 static int
 gmbus_wait_idle(struct drm_i915_private *dev_priv)
 {
+	DEFINE_WAIT(wait);
+	u32 irq_enable;
 	int ret;
 
-	if (!HAS_GMBUS_IRQ(dev_priv))
-		return intel_wait_for_register(dev_priv,
-					       GMBUS2, GMBUS_ACTIVE, 0,
-					       10);
-
 	/* Important: The hw handles only the first bit, so set only one! */
-	I915_WRITE(GMBUS4, GMBUS_IDLE_EN);
+	irq_enable = 0;
+	if (HAS_GMBUS_IRQ(dev_priv))
+		irq_enable = GMBUS_IDLE_EN;
 
-	ret = wait_event_timeout(dev_priv->gmbus_wait_queue,
-				 (I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0,
-				 msecs_to_jiffies_timeout(10));
+	add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+	I915_WRITE_FW(GMBUS4, irq_enable);
 
-	I915_WRITE(GMBUS4, 0);
+	ret = intel_wait_for_register_fw(dev_priv,
+					 GMBUS2, GMBUS_ACTIVE, 0,
+					 10);
 
-	if (ret)
-		return 0;
-	else
-		return -ETIMEDOUT;
+	I915_WRITE_FW(GMBUS4, 0);
+	remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+
+	return ret;
 }
 
 static int
@@ -323,22 +315,21 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
 		      unsigned short addr, u8 *buf, unsigned int len,
 		      u32 gmbus1_index)
 {
-	I915_WRITE(GMBUS1,
-		   gmbus1_index |
-		   GMBUS_CYCLE_WAIT |
-		   (len << GMBUS_BYTE_COUNT_SHIFT) |
-		   (addr << GMBUS_SLAVE_ADDR_SHIFT) |
-		   GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+	I915_WRITE_FW(GMBUS1,
+		      gmbus1_index |
+		      GMBUS_CYCLE_WAIT |
+		      (len << GMBUS_BYTE_COUNT_SHIFT) |
+		      (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+		      GMBUS_SLAVE_READ | GMBUS_SW_RDY);
 	while (len) {
 		int ret;
 		u32 val, loop = 0;
 
-		ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
-					   GMBUS_HW_RDY_EN);
+		ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
 		if (ret)
 			return ret;
 
-		val = I915_READ(GMBUS3);
+		val = I915_READ_FW(GMBUS3);
 		do {
 			*buf++ = val & 0xff;
 			val >>= 8;
@@ -385,12 +376,12 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
 		len -= 1;
 	}
 
-	I915_WRITE(GMBUS3, val);
-	I915_WRITE(GMBUS1,
-		   GMBUS_CYCLE_WAIT |
-		   (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
-		   (addr << GMBUS_SLAVE_ADDR_SHIFT) |
-		   GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+	I915_WRITE_FW(GMBUS3, val);
+	I915_WRITE_FW(GMBUS1,
+		      GMBUS_CYCLE_WAIT |
+		      (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
+		      (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+		      GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
 	while (len) {
 		int ret;
 
@@ -399,10 +390,9 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
 			val |= *buf++ << (8 * loop);
 		} while (--len && ++loop < 4);
 
-		I915_WRITE(GMBUS3, val);
+		I915_WRITE_FW(GMBUS3, val);
 
-		ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
-					   GMBUS_HW_RDY_EN);
+		ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
 		if (ret)
 			return ret;
 	}
@@ -460,13 +450,13 @@ gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
 
 	/* GMBUS5 holds 16-bit index */
 	if (gmbus5)
-		I915_WRITE(GMBUS5, gmbus5);
+		I915_WRITE_FW(GMBUS5, gmbus5);
 
 	ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
 
 	/* Clear GMBUS5 after each index transfer */
 	if (gmbus5)
-		I915_WRITE(GMBUS5, 0);
+		I915_WRITE_FW(GMBUS5, 0);
 
 	return ret;
 }
@@ -478,11 +468,15 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
 					       struct intel_gmbus,
 					       adapter);
 	struct drm_i915_private *dev_priv = bus->dev_priv;
+	const unsigned int fw =
+		intel_uncore_forcewake_for_reg(dev_priv, GMBUS0,
+					       FW_REG_READ | FW_REG_WRITE);
 	int i = 0, inc, try = 0;
 	int ret = 0;
 
+	intel_uncore_forcewake_get(dev_priv, fw);
 retry:
-	I915_WRITE(GMBUS0, bus->reg0);
+	I915_WRITE_FW(GMBUS0, bus->reg0);
 
 	for (; i < num; i += inc) {
 		inc = 1;
@@ -496,8 +490,8 @@ retry:
 		}
 
 		if (!ret)
-			ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
-						   GMBUS_HW_WAIT_EN);
+			ret = gmbus_wait(dev_priv,
+					 GMBUS_HW_WAIT_PHASE, GMBUS_HW_WAIT_EN);
 		if (ret == -ETIMEDOUT)
 			goto timeout;
 		else if (ret)
@@ -508,7 +502,7 @@ retry:
 	 * a STOP on the very first cycle. To simplify the code we
 	 * unconditionally generate the STOP condition with an additional gmbus
 	 * cycle. */
-	I915_WRITE(GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+	I915_WRITE_FW(GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
 
 	/* Mark the GMBUS interface as disabled after waiting for idle.
 	 * We will re-enable it at the start of the next xfer,
@@ -519,7 +513,7 @@ retry:
 			 adapter->name);
 		ret = -ETIMEDOUT;
 	}
-	I915_WRITE(GMBUS0, 0);
+	I915_WRITE_FW(GMBUS0, 0);
 	ret = ret ?: i;
 	goto out;
 
@@ -548,9 +542,9 @@ clear_err:
 	 * of resetting the GMBUS controller and so clearing the
 	 * BUS_ERROR raised by the slave's NAK.
 	 */
-	I915_WRITE(GMBUS1, GMBUS_SW_CLR_INT);
-	I915_WRITE(GMBUS1, 0);
-	I915_WRITE(GMBUS0, 0);
+	I915_WRITE_FW(GMBUS1, GMBUS_SW_CLR_INT);
+	I915_WRITE_FW(GMBUS1, 0);
+	I915_WRITE_FW(GMBUS0, 0);
 
 	DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
 			 adapter->name, msgs[i].addr,
@@ -573,7 +567,7 @@ clear_err:
 timeout:
 	DRM_DEBUG_KMS("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
 		      bus->adapter.name, bus->reg0 & 0xff);
-	I915_WRITE(GMBUS0, 0);
+	I915_WRITE_FW(GMBUS0, 0);
 
 	/*
 	 * Hardware may not support GMBUS over these pins? Try GPIO bitbanging
@@ -582,6 +576,7 @@ timeout:
 	ret = -EAGAIN;
 
 out:
+	intel_uncore_forcewake_put(dev_priv, fw);
 	return ret;
 }
 
@@ -633,6 +628,7 @@ static const struct i2c_algorithm gmbus_algorithm = {
 int intel_setup_gmbus(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	struct intel_gmbus *bus;
 	unsigned int pin;
 	int ret;
@@ -663,7 +659,7 @@ int intel_setup_gmbus(struct drm_device *dev)
 			 "i915 gmbus %s",
 			 get_gmbus_pin(dev_priv, pin)->name);
 
-		bus->adapter.dev.parent = &dev->pdev->dev;
+		bus->adapter.dev.parent = &pdev->dev;
 		bus->dev_priv = dev_priv;
 
 		bus->adapter.algo = &gmbus_algorithm;

+ 246 - 294
drivers/gpu/drm/i915/intel_lrc.c

@@ -156,6 +156,11 @@
 #define GEN8_CTX_STATUS_COMPLETE	(1 << 4)
 #define GEN8_CTX_STATUS_LITE_RESTORE	(1 << 15)
 
+#define GEN8_CTX_STATUS_COMPLETED_MASK \
+	 (GEN8_CTX_STATUS_ACTIVE_IDLE | \
+	  GEN8_CTX_STATUS_PREEMPTED | \
+	  GEN8_CTX_STATUS_ELEMENT_SWITCH)
+
 #define CTX_LRI_HEADER_0		0x01
 #define CTX_CONTEXT_CONTROL		0x02
 #define CTX_RING_HEAD			0x04
@@ -263,12 +268,10 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 
-	if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
-		engine->idle_lite_restore_wa = ~0;
-
-	engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
-					IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
-					(engine->id == VCS || engine->id == VCS2);
+	engine->disable_lite_restore_wa =
+		(IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+		 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
+		(engine->id == VCS || engine->id == VCS2);
 
 	engine->ctx_desc_template = GEN8_CTX_VALID;
 	if (IS_GEN8(dev_priv))
@@ -328,34 +331,18 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
 	return ctx->engine[engine->id].lrc_desc;
 }
 
-static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
-				 struct drm_i915_gem_request *rq1)
+static inline void
+execlists_context_status_change(struct drm_i915_gem_request *rq,
+				unsigned long status)
 {
+	/*
+	 * Only used when GVT-g is enabled now. When GVT-g is disabled,
+	 * The compiler should eliminate this function as dead-code.
+	 */
+	if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
+		return;
 
-	struct intel_engine_cs *engine = rq0->engine;
-	struct drm_i915_private *dev_priv = rq0->i915;
-	uint64_t desc[2];
-
-	if (rq1) {
-		desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
-		rq1->elsp_submitted++;
-	} else {
-		desc[1] = 0;
-	}
-
-	desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
-	rq0->elsp_submitted++;
-
-	/* You must always write both descriptors in the order below. */
-	I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
-	I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
-
-	I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
-	/* The context is automatically loaded after the following */
-	I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
-
-	/* ELSP is a wo register, use another nearby reg for posting */
-	POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
+	atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
 }
 
 static void
@@ -367,11 +354,11 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
 	ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
 }
 
-static void execlists_update_context(struct drm_i915_gem_request *rq)
+static u64 execlists_update_context(struct drm_i915_gem_request *rq)
 {
-	struct intel_engine_cs *engine = rq->engine;
+	struct intel_context *ce = &rq->ctx->engine[rq->engine->id];
 	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
-	uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
+	u32 *reg_state = ce->lrc_reg_state;
 
 	reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
 
@@ -382,158 +369,163 @@ static void execlists_update_context(struct drm_i915_gem_request *rq)
 	 */
 	if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
 		execlists_update_context_pdps(ppgtt, reg_state);
+
+	return ce->lrc_desc;
 }
 
-static void execlists_elsp_submit_contexts(struct drm_i915_gem_request *rq0,
-					   struct drm_i915_gem_request *rq1)
+static void execlists_submit_ports(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = rq0->i915;
-	unsigned int fw_domains = rq0->engine->fw_domains;
-
-	execlists_update_context(rq0);
+	struct drm_i915_private *dev_priv = engine->i915;
+	struct execlist_port *port = engine->execlist_port;
+	u32 __iomem *elsp =
+		dev_priv->regs + i915_mmio_reg_offset(RING_ELSP(engine));
+	u64 desc[2];
 
-	if (rq1)
-		execlists_update_context(rq1);
+	if (!port[0].count)
+		execlists_context_status_change(port[0].request,
+						INTEL_CONTEXT_SCHEDULE_IN);
+	desc[0] = execlists_update_context(port[0].request);
+	engine->preempt_wa = port[0].count++; /* bdw only? fixed on skl? */
 
-	spin_lock_irq(&dev_priv->uncore.lock);
-	intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+	if (port[1].request) {
+		GEM_BUG_ON(port[1].count);
+		execlists_context_status_change(port[1].request,
+						INTEL_CONTEXT_SCHEDULE_IN);
+		desc[1] = execlists_update_context(port[1].request);
+		port[1].count = 1;
+	} else {
+		desc[1] = 0;
+	}
+	GEM_BUG_ON(desc[0] == desc[1]);
 
-	execlists_elsp_write(rq0, rq1);
+	/* You must always write both descriptors in the order below. */
+	writel(upper_32_bits(desc[1]), elsp);
+	writel(lower_32_bits(desc[1]), elsp);
 
-	intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
-	spin_unlock_irq(&dev_priv->uncore.lock);
+	writel(upper_32_bits(desc[0]), elsp);
+	/* The context is automatically loaded after the following */
+	writel(lower_32_bits(desc[0]), elsp);
 }
 
-static inline void execlists_context_status_change(
-		struct drm_i915_gem_request *rq,
-		unsigned long status)
+static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
 {
-	/*
-	 * Only used when GVT-g is enabled now. When GVT-g is disabled,
-	 * The compiler should eliminate this function as dead-code.
-	 */
-	if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
-		return;
-
-	atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
+	return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
+		ctx->execlists_force_single_submission);
 }
 
-static void execlists_unqueue(struct intel_engine_cs *engine)
+static bool can_merge_ctx(const struct i915_gem_context *prev,
+			  const struct i915_gem_context *next)
 {
-	struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
-	struct drm_i915_gem_request *cursor, *tmp;
+	if (prev != next)
+		return false;
 
-	assert_spin_locked(&engine->execlist_lock);
+	if (ctx_single_port_submission(prev))
+		return false;
 
-	/*
-	 * If irqs are not active generate a warning as batches that finish
-	 * without the irqs may get lost and a GPU Hang may occur.
-	 */
-	WARN_ON(!intel_irqs_enabled(engine->i915));
-
-	/* Try to read in pairs */
-	list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
-				 execlist_link) {
-		if (!req0) {
-			req0 = cursor;
-		} else if (req0->ctx == cursor->ctx) {
-			/* Same ctx: ignore first request, as second request
-			 * will update tail past first request's workload */
-			cursor->elsp_submitted = req0->elsp_submitted;
-			list_del(&req0->execlist_link);
-			i915_gem_request_put(req0);
-			req0 = cursor;
-		} else {
-			if (IS_ENABLED(CONFIG_DRM_I915_GVT)) {
-				/*
-				 * req0 (after merged) ctx requires single
-				 * submission, stop picking
-				 */
-				if (req0->ctx->execlists_force_single_submission)
-					break;
-				/*
-				 * req0 ctx doesn't require single submission,
-				 * but next req ctx requires, stop picking
-				 */
-				if (cursor->ctx->execlists_force_single_submission)
-					break;
-			}
-			req1 = cursor;
-			WARN_ON(req1->elsp_submitted);
-			break;
-		}
-	}
+	return true;
+}
 
-	if (unlikely(!req0))
-		return;
+static void execlists_dequeue(struct intel_engine_cs *engine)
+{
+	struct drm_i915_gem_request *cursor, *last;
+	struct execlist_port *port = engine->execlist_port;
+	bool submit = false;
+
+	last = port->request;
+	if (last)
+		/* WaIdleLiteRestore:bdw,skl
+		 * Apply the wa NOOPs to prevent ring:HEAD == req:TAIL
+		 * as we resubmit the request. See gen8_emit_request()
+		 * for where we prepare the padding after the end of the
+		 * request.
+		 */
+		last->tail = last->wa_tail;
 
-	execlists_context_status_change(req0, INTEL_CONTEXT_SCHEDULE_IN);
+	GEM_BUG_ON(port[1].request);
 
-	if (req1)
-		execlists_context_status_change(req1,
-						INTEL_CONTEXT_SCHEDULE_IN);
+	/* Hardware submission is through 2 ports. Conceptually each port
+	 * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
+	 * static for a context, and unique to each, so we only execute
+	 * requests belonging to a single context from each ring. RING_HEAD
+	 * is maintained by the CS in the context image, it marks the place
+	 * where it got up to last time, and through RING_TAIL we tell the CS
+	 * where we want to execute up to this time.
+	 *
+	 * In this list the requests are in order of execution. Consecutive
+	 * requests from the same context are adjacent in the ringbuffer. We
+	 * can combine these requests into a single RING_TAIL update:
+	 *
+	 *              RING_HEAD...req1...req2
+	 *                                    ^- RING_TAIL
+	 * since to execute req2 the CS must first execute req1.
+	 *
+	 * Our goal then is to point each port to the end of a consecutive
+	 * sequence of requests as being the most optimal (fewest wake ups
+	 * and context switches) submission.
+	 */
 
-	if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
-		/*
-		 * WaIdleLiteRestore: make sure we never cause a lite restore
-		 * with HEAD==TAIL.
+	spin_lock(&engine->execlist_lock);
+	list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) {
+		/* Can we combine this request with the current port? It has to
+		 * be the same context/ringbuffer and not have any exceptions
+		 * (e.g. GVT saying never to combine contexts).
 		 *
-		 * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we
-		 * resubmit the request. See gen8_emit_request() for where we
-		 * prepare the padding after the end of the request.
+		 * If we can combine the requests, we can execute both by
+		 * updating the RING_TAIL to point to the end of the second
+		 * request, and so we never need to tell the hardware about
+		 * the first.
 		 */
-		req0->tail += 8;
-		req0->tail &= req0->ring->size - 1;
+		if (last && !can_merge_ctx(cursor->ctx, last->ctx)) {
+			/* If we are on the second port and cannot combine
+			 * this request with the last, then we are done.
+			 */
+			if (port != engine->execlist_port)
+				break;
+
+			/* If GVT overrides us we only ever submit port[0],
+			 * leaving port[1] empty. Note that we also have
+			 * to be careful that we don't queue the same
+			 * context (even though a different request) to
+			 * the second port.
+			 */
+			if (ctx_single_port_submission(cursor->ctx))
+				break;
+
+			GEM_BUG_ON(last->ctx == cursor->ctx);
+
+			i915_gem_request_assign(&port->request, last);
+			port++;
+		}
+		last = cursor;
+		submit = true;
+	}
+	if (submit) {
+		/* Decouple all the requests submitted from the queue */
+		engine->execlist_queue.next = &cursor->execlist_link;
+		cursor->execlist_link.prev = &engine->execlist_queue;
+
+		i915_gem_request_assign(&port->request, last);
 	}
+	spin_unlock(&engine->execlist_lock);
 
-	execlists_elsp_submit_contexts(req0, req1);
+	if (submit)
+		execlists_submit_ports(engine);
 }
 
-static unsigned int
-execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
+static bool execlists_elsp_idle(struct intel_engine_cs *engine)
 {
-	struct drm_i915_gem_request *head_req;
-
-	assert_spin_locked(&engine->execlist_lock);
-
-	head_req = list_first_entry_or_null(&engine->execlist_queue,
-					    struct drm_i915_gem_request,
-					    execlist_link);
-
-	if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
-               return 0;
-
-	WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
-
-	if (--head_req->elsp_submitted > 0)
-		return 0;
-
-	execlists_context_status_change(head_req, INTEL_CONTEXT_SCHEDULE_OUT);
-
-	list_del(&head_req->execlist_link);
-	i915_gem_request_put(head_req);
-
-	return 1;
+	return !engine->execlist_port[0].request;
 }
 
-static u32
-get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
-		   u32 *context_id)
+static bool execlists_elsp_ready(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-	u32 status;
-
-	read_pointer %= GEN8_CSB_ENTRIES;
-
-	status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
-
-	if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
-		return 0;
+	int port;
 
-	*context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
-							      read_pointer));
+	port = 1; /* wait for a free slot */
+	if (engine->disable_lite_restore_wa || engine->preempt_wa)
+		port = 0; /* wait for GPU to be idle before continuing */
 
-	return status;
+	return !engine->execlist_port[port].request;
 }
 
 /*
@@ -543,103 +535,70 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
 static void intel_lrc_irq_handler(unsigned long data)
 {
 	struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+	struct execlist_port *port = engine->execlist_port;
 	struct drm_i915_private *dev_priv = engine->i915;
-	u32 status_pointer;
-	unsigned int read_pointer, write_pointer;
-	u32 csb[GEN8_CSB_ENTRIES][2];
-	unsigned int csb_read = 0, i;
-	unsigned int submit_contexts = 0;
 
 	intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
 
-	status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
-
-	read_pointer = engine->next_context_status_buffer;
-	write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
-	if (read_pointer > write_pointer)
-		write_pointer += GEN8_CSB_ENTRIES;
-
-	while (read_pointer < write_pointer) {
-		if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES))
-			break;
-		csb[csb_read][0] = get_context_status(engine, ++read_pointer,
-						      &csb[csb_read][1]);
-		csb_read++;
-	}
-
-	engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
-
-	/* Update the read pointer to the old write pointer. Manual ringbuffer
-	 * management ftw </sarcasm> */
-	I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
-		      _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
-				    engine->next_context_status_buffer << 8));
-
-	intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
-
-	spin_lock(&engine->execlist_lock);
+	if (!execlists_elsp_idle(engine)) {
+		u32 __iomem *csb_mmio =
+			dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
+		u32 __iomem *buf =
+			dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0));
+		unsigned int csb, head, tail;
+
+		csb = readl(csb_mmio);
+		head = GEN8_CSB_READ_PTR(csb);
+		tail = GEN8_CSB_WRITE_PTR(csb);
+		if (tail < head)
+			tail += GEN8_CSB_ENTRIES;
+		while (head < tail) {
+			unsigned int idx = ++head % GEN8_CSB_ENTRIES;
+			unsigned int status = readl(buf + 2 * idx);
+
+			if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
+				continue;
+
+			GEM_BUG_ON(port[0].count == 0);
+			if (--port[0].count == 0) {
+				GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
+				execlists_context_status_change(port[0].request,
+								INTEL_CONTEXT_SCHEDULE_OUT);
+
+				i915_gem_request_put(port[0].request);
+				port[0] = port[1];
+				memset(&port[1], 0, sizeof(port[1]));
+
+				engine->preempt_wa = false;
+			}
 
-	for (i = 0; i < csb_read; i++) {
-		if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) {
-			if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) {
-				if (execlists_check_remove_request(engine, csb[i][1]))
-					WARN(1, "Lite Restored request removed from queue\n");
-			} else
-				WARN(1, "Preemption without Lite Restore\n");
+			GEM_BUG_ON(port[0].count == 0 &&
+				   !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
 		}
 
-		if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE |
-		    GEN8_CTX_STATUS_ELEMENT_SWITCH))
-			submit_contexts +=
-				execlists_check_remove_request(engine, csb[i][1]);
-	}
-
-	if (submit_contexts) {
-		if (!engine->disable_lite_restore_wa ||
-		    (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
-			execlists_unqueue(engine);
+		writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
+				     GEN8_CSB_WRITE_PTR(csb) << 8),
+		       csb_mmio);
 	}
 
-	spin_unlock(&engine->execlist_lock);
+	if (execlists_elsp_ready(engine))
+		execlists_dequeue(engine);
 
-	if (unlikely(submit_contexts > 2))
-		DRM_ERROR("More than two context complete events?\n");
+	intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
 }
 
 static void execlists_submit_request(struct drm_i915_gem_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
-	struct drm_i915_gem_request *cursor;
-	int num_elements = 0;
-
-	spin_lock_bh(&engine->execlist_lock);
+	unsigned long flags;
 
-	list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
-		if (++num_elements > 2)
-			break;
+	spin_lock_irqsave(&engine->execlist_lock, flags);
 
-	if (num_elements > 2) {
-		struct drm_i915_gem_request *tail_req;
-
-		tail_req = list_last_entry(&engine->execlist_queue,
-					   struct drm_i915_gem_request,
-					   execlist_link);
-
-		if (request->ctx == tail_req->ctx) {
-			WARN(tail_req->elsp_submitted != 0,
-				"More than 2 already-submitted reqs queued\n");
-			list_del(&tail_req->execlist_link);
-			i915_gem_request_put(tail_req);
-		}
-	}
-
-	i915_gem_request_get(request);
 	list_add_tail(&request->execlist_link, &engine->execlist_queue);
-	request->ctx_hw_id = request->ctx->hw_id;
-	if (num_elements == 0)
-		execlists_unqueue(engine);
+	if (execlists_elsp_idle(engine))
+		tasklet_hi_schedule(&engine->irq_tasklet);
 
-	spin_unlock_bh(&engine->execlist_lock);
+	spin_unlock_irqrestore(&engine->execlist_lock, flags);
 }
 
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -668,7 +627,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
 		 * going any further, as the i915_add_request() call
 		 * later on mustn't fail ...
 		 */
-		ret = i915_guc_wq_check_space(request);
+		ret = i915_guc_wq_reserve(request);
 		if (ret)
 			return ret;
 	}
@@ -731,6 +690,7 @@ intel_logical_ring_advance(struct drm_i915_gem_request *request)
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
+	request->wa_tail = ring->tail;
 
 	/* We keep the previous context alive until we retire the following
 	 * request. This ensures that any the context object is still pinned
@@ -743,23 +703,6 @@ intel_logical_ring_advance(struct drm_i915_gem_request *request)
 	return 0;
 }
 
-void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
-{
-	struct drm_i915_gem_request *req, *tmp;
-	LIST_HEAD(cancel_list);
-
-	WARN_ON(!mutex_is_locked(&engine->i915->drm.struct_mutex));
-
-	spin_lock_bh(&engine->execlist_lock);
-	list_replace_init(&engine->execlist_queue, &cancel_list);
-	spin_unlock_bh(&engine->execlist_lock);
-
-	list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
-		list_del(&req->execlist_link);
-		i915_gem_request_put(req);
-	}
-}
-
 static int intel_lr_context_pin(struct i915_gem_context *ctx,
 				struct intel_engine_cs *engine)
 {
@@ -1280,48 +1223,30 @@ static void lrc_init_hws(struct intel_engine_cs *engine)
 static int gen8_init_common_ring(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	unsigned int next_context_status_buffer_hw;
+	int ret;
+
+	ret = intel_mocs_init_engine(engine);
+	if (ret)
+		return ret;
 
 	lrc_init_hws(engine);
 
-	I915_WRITE_IMR(engine,
-		       ~(engine->irq_enable_mask | engine->irq_keep_mask));
+	intel_engine_reset_irq(engine);
+
 	I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
 
 	I915_WRITE(RING_MODE_GEN7(engine),
 		   _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
 		   _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
-	POSTING_READ(RING_MODE_GEN7(engine));
-
-	/*
-	 * Instead of resetting the Context Status Buffer (CSB) read pointer to
-	 * zero, we need to read the write pointer from hardware and use its
-	 * value because "this register is power context save restored".
-	 * Effectively, these states have been observed:
-	 *
-	 *      | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
-	 * BDW  | CSB regs not reset       | CSB regs reset       |
-	 * CHT  | CSB regs not reset       | CSB regs not reset   |
-	 * SKL  |         ?                |         ?            |
-	 * BXT  |         ?                |         ?            |
-	 */
-	next_context_status_buffer_hw =
-		GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
-
-	/*
-	 * When the CSB registers are reset (also after power-up / gpu reset),
-	 * CSB write pointer is set to all 1's, which is not valid, use '5' in
-	 * this special case, so the first element read is CSB[0].
-	 */
-	if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
-		next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
 
-	engine->next_context_status_buffer = next_context_status_buffer_hw;
 	DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
 
 	intel_engine_init_hangcheck(engine);
 
-	return intel_mocs_init_engine(engine);
+	if (!execlists_elsp_idle(engine))
+		execlists_submit_ports(engine);
+
+	return 0;
 }
 
 static int gen8_init_render_ring(struct intel_engine_cs *engine)
@@ -1357,6 +1282,36 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
 	return init_workarounds_ring(engine);
 }
 
+static void reset_common_ring(struct intel_engine_cs *engine,
+			      struct drm_i915_gem_request *request)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	struct execlist_port *port = engine->execlist_port;
+	struct intel_context *ce = &request->ctx->engine[engine->id];
+
+	/* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
+	ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
+	request->ring->head = request->postfix;
+	request->ring->last_retired_head = -1;
+	intel_ring_update_space(request->ring);
+
+	if (i915.enable_guc_submission)
+		return;
+
+	/* Catch up with any missed context-switch interrupts */
+	I915_WRITE(RING_CONTEXT_STATUS_PTR(engine), _MASKED_FIELD(0xffff, 0));
+	if (request->ctx != port[0].request->ctx) {
+		i915_gem_request_put(port[0].request);
+		port[0] = port[1];
+		memset(&port[1], 0, sizeof(port[1]));
+	}
+
+	/* CS is stopped, and we will resubmit both ports on resume */
+	GEM_BUG_ON(request->ctx != port[0].request->ctx);
+	port[0].count = 0;
+	port[1].count = 0;
+}
+
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
 	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
@@ -1702,10 +1657,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
 	}
 	intel_lr_context_unpin(dev_priv->kernel_context, engine);
 
-	engine->idle_lite_restore_wa = 0;
-	engine->disable_lite_restore_wa = false;
-	engine->ctx_desc_template = 0;
-
 	lrc_destroy_wa_ctx_obj(engine);
 	engine->i915 = NULL;
 }
@@ -1723,6 +1674,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 {
 	/* Default vfuncs which can be overriden by each engine. */
 	engine->init_hw = gen8_init_common_ring;
+	engine->reset_hw = reset_common_ring;
 	engine->emit_flush = gen8_emit_flush;
 	engine->emit_request = gen8_emit_request;
 	engine->submit_request = execlists_submit_request;
@@ -1896,24 +1848,24 @@ make_rpcs(struct drm_i915_private *dev_priv)
 	 * must make an explicit request through RPCS for full
 	 * enablement.
 	*/
-	if (INTEL_INFO(dev_priv)->has_slice_pg) {
+	if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) {
 		rpcs |= GEN8_RPCS_S_CNT_ENABLE;
-		rpcs |= INTEL_INFO(dev_priv)->slice_total <<
+		rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask) <<
 			GEN8_RPCS_S_CNT_SHIFT;
 		rpcs |= GEN8_RPCS_ENABLE;
 	}
 
-	if (INTEL_INFO(dev_priv)->has_subslice_pg) {
+	if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) {
 		rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
-		rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
+		rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask) <<
 			GEN8_RPCS_SS_CNT_SHIFT;
 		rpcs |= GEN8_RPCS_ENABLE;
 	}
 
-	if (INTEL_INFO(dev_priv)->has_eu_pg) {
-		rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
+	if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) {
+		rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
 			GEN8_RPCS_EU_MIN_SHIFT;
-		rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
+		rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
 			GEN8_RPCS_EU_MAX_SHIFT;
 		rpcs |= GEN8_RPCS_ENABLE;
 	}
@@ -2175,9 +2127,9 @@ error_deref_obj:
 	return ret;
 }
 
-void intel_lr_context_reset(struct drm_i915_private *dev_priv,
-			    struct i915_gem_context *ctx)
+void intel_lr_context_resume(struct drm_i915_private *dev_priv)
 {
+	struct i915_gem_context *ctx = dev_priv->kernel_context;
 	struct intel_engine_cs *engine;
 
 	for_each_engine(engine, dev_priv) {

+ 1 - 4
drivers/gpu/drm/i915/intel_lrc.h

@@ -87,8 +87,7 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
 
 struct drm_i915_private;
 
-void intel_lr_context_reset(struct drm_i915_private *dev_priv,
-			    struct i915_gem_context *ctx);
+void intel_lr_context_resume(struct drm_i915_private *dev_priv);
 uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
 				     struct intel_engine_cs *engine);
 
@@ -97,6 +96,4 @@ int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
 				    int enable_execlists);
 void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
 
-void intel_execlists_cancel_requests(struct intel_engine_cs *engine);
-
 #endif /* _INTEL_LRC_H_ */

+ 32 - 20
drivers/gpu/drm/i915/intel_lvds.c

@@ -230,20 +230,21 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
 	I915_WRITE(PP_DIVISOR(0), val);
 }
 
-static void intel_pre_enable_lvds(struct intel_encoder *encoder)
+static void intel_pre_enable_lvds(struct intel_encoder *encoder,
+				  struct intel_crtc_state *pipe_config,
+				  struct drm_connector_state *conn_state)
 {
 	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
-	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 	int pipe = crtc->pipe;
 	u32 temp;
 
-	if (HAS_PCH_SPLIT(dev)) {
+	if (HAS_PCH_SPLIT(dev_priv)) {
 		assert_fdi_rx_pll_disabled(dev_priv, pipe);
 		assert_shared_dpll_disabled(dev_priv,
-					    crtc->config->shared_dpll);
+					    pipe_config->shared_dpll);
 	} else {
 		assert_pll_disabled(dev_priv, pipe);
 	}
@@ -253,7 +254,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
 	temp = lvds_encoder->init_lvds_val;
 	temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
 
-	if (HAS_PCH_CPT(dev)) {
+	if (HAS_PCH_CPT(dev_priv)) {
 		temp &= ~PORT_TRANS_SEL_MASK;
 		temp |= PORT_TRANS_SEL_CPT(pipe);
 	} else {
@@ -266,7 +267,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
 
 	/* set the corresponsding LVDS_BORDER bit */
 	temp &= ~LVDS_BORDER_ENABLE;
-	temp |= crtc->config->gmch_pfit.lvds_border_bits;
+	temp |= pipe_config->gmch_pfit.lvds_border_bits;
 	/* Set the B0-B3 data pairs corresponding to whether we're going to
 	 * set the DPLLs for dual-channel mode or not.
 	 */
@@ -289,7 +290,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
 	if (IS_GEN4(dev_priv)) {
 		/* Bspec wording suggests that LVDS port dithering only exists
 		 * for 18bpp panels. */
-		if (crtc->config->dither && crtc->config->pipe_bpp == 18)
+		if (pipe_config->dither && pipe_config->pipe_bpp == 18)
 			temp |= LVDS_ENABLE_DITHER;
 		else
 			temp &= ~LVDS_ENABLE_DITHER;
@@ -306,7 +307,9 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
 /**
  * Sets the power state for the panel.
  */
-static void intel_enable_lvds(struct intel_encoder *encoder)
+static void intel_enable_lvds(struct intel_encoder *encoder,
+			      struct intel_crtc_state *pipe_config,
+			      struct drm_connector_state *conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
@@ -324,11 +327,12 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
 	intel_panel_enable_backlight(intel_connector);
 }
 
-static void intel_disable_lvds(struct intel_encoder *encoder)
+static void intel_disable_lvds(struct intel_encoder *encoder,
+			       struct intel_crtc_state *old_crtc_state,
+			       struct drm_connector_state *old_conn_state)
 {
-	struct drm_device *dev = encoder->base.dev;
 	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
 	I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON);
 	if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, 0, 1000))
@@ -338,7 +342,10 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
 	POSTING_READ(lvds_encoder->reg);
 }
 
-static void gmch_disable_lvds(struct intel_encoder *encoder)
+static void gmch_disable_lvds(struct intel_encoder *encoder,
+			      struct intel_crtc_state *old_crtc_state,
+			      struct drm_connector_state *old_conn_state)
+
 {
 	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
 	struct intel_connector *intel_connector =
@@ -346,10 +353,12 @@ static void gmch_disable_lvds(struct intel_encoder *encoder)
 
 	intel_panel_disable_backlight(intel_connector);
 
-	intel_disable_lvds(encoder);
+	intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
 }
 
-static void pch_disable_lvds(struct intel_encoder *encoder)
+static void pch_disable_lvds(struct intel_encoder *encoder,
+			     struct intel_crtc_state *old_crtc_state,
+			     struct drm_connector_state *old_conn_state)
 {
 	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
 	struct intel_connector *intel_connector =
@@ -358,9 +367,11 @@ static void pch_disable_lvds(struct intel_encoder *encoder)
 	intel_panel_disable_backlight(intel_connector);
 }
 
-static void pch_post_disable_lvds(struct intel_encoder *encoder)
+static void pch_post_disable_lvds(struct intel_encoder *encoder,
+				  struct intel_crtc_state *old_crtc_state,
+				  struct drm_connector_state *old_conn_state)
 {
-	intel_disable_lvds(encoder);
+	intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
 }
 
 static enum drm_mode_status
@@ -382,7 +393,8 @@ intel_lvds_mode_valid(struct drm_connector *connector,
 }
 
 static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
-				      struct intel_crtc_state *pipe_config)
+				      struct intel_crtc_state *pipe_config,
+				      struct drm_connector_state *conn_state)
 {
 	struct drm_device *dev = intel_encoder->base.dev;
 	struct intel_lvds_encoder *lvds_encoder =

+ 27 - 0
drivers/gpu/drm/i915/intel_opregion.c

@@ -1047,6 +1047,23 @@ err_out:
 	return err;
 }
 
+static int intel_use_opregion_panel_type_callback(const struct dmi_system_id *id)
+{
+	DRM_INFO("Using panel type from OpRegion on %s\n", id->ident);
+	return 1;
+}
+
+static const struct dmi_system_id intel_use_opregion_panel_type[] = {
+	{
+		.callback = intel_use_opregion_panel_type_callback,
+		.ident = "Conrac GmbH IX45GM2",
+		.matches = {DMI_MATCH(DMI_SYS_VENDOR, "Conrac GmbH"),
+			    DMI_MATCH(DMI_PRODUCT_NAME, "IX45GM2"),
+		},
+	},
+	{ }
+};
+
 int
 intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
 {
@@ -1072,6 +1089,16 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
 		return -ENODEV;
 	}
 
+	/*
+	 * So far we know that some machined must use it, others must not use it.
+	 * There doesn't seem to be any way to determine which way to go, except
+	 * via a quirk list :(
+	 */
+	if (!dmi_check_system(intel_use_opregion_panel_type)) {
+		DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
+		return -ENODEV;
+	}
+
 	/*
 	 * FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us
 	 * low vswing for eDP, whereas the VBT panel type (2) gives us normal

+ 24 - 16
drivers/gpu/drm/i915/intel_panel.c

@@ -1430,10 +1430,11 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
 	panel->backlight.min = get_backlight_min_vbt(connector);
 
 	val = lpt_get_backlight(connector);
-	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+	val = intel_panel_compute_brightness(connector, val);
+	panel->backlight.level = clamp(val, panel->backlight.min,
+				       panel->backlight.max);
 
-	panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) &&
-		panel->backlight.level != 0;
+	panel->backlight.enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
 
 	return 0;
 }
@@ -1459,11 +1460,13 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
 	panel->backlight.min = get_backlight_min_vbt(connector);
 
 	val = pch_get_backlight(connector);
-	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+	val = intel_panel_compute_brightness(connector, val);
+	panel->backlight.level = clamp(val, panel->backlight.min,
+				       panel->backlight.max);
 
 	cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
 	panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
-		(pch_ctl1 & BLM_PCH_PWM_ENABLE) && panel->backlight.level != 0;
+		(pch_ctl1 & BLM_PCH_PWM_ENABLE);
 
 	return 0;
 }
@@ -1498,9 +1501,11 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
 	panel->backlight.min = get_backlight_min_vbt(connector);
 
 	val = i9xx_get_backlight(connector);
-	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+	val = intel_panel_compute_brightness(connector, val);
+	panel->backlight.level = clamp(val, panel->backlight.min,
+				       panel->backlight.max);
 
-	panel->backlight.enabled = panel->backlight.level != 0;
+	panel->backlight.enabled = val != 0;
 
 	return 0;
 }
@@ -1530,10 +1535,11 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
 	panel->backlight.min = get_backlight_min_vbt(connector);
 
 	val = i9xx_get_backlight(connector);
-	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+	val = intel_panel_compute_brightness(connector, val);
+	panel->backlight.level = clamp(val, panel->backlight.min,
+				       panel->backlight.max);
 
-	panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
-		panel->backlight.level != 0;
+	panel->backlight.enabled = ctl2 & BLM_PWM_ENABLE;
 
 	return 0;
 }
@@ -1562,10 +1568,11 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
 	panel->backlight.min = get_backlight_min_vbt(connector);
 
 	val = _vlv_get_backlight(dev_priv, pipe);
-	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+	val = intel_panel_compute_brightness(connector, val);
+	panel->backlight.level = clamp(val, panel->backlight.min,
+				       panel->backlight.max);
 
-	panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
-		panel->backlight.level != 0;
+	panel->backlight.enabled = ctl2 & BLM_PWM_ENABLE;
 
 	return 0;
 }
@@ -1607,10 +1614,11 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
 		return -ENODEV;
 
 	val = bxt_get_backlight(connector);
-	panel->backlight.level = intel_panel_compute_brightness(connector, val);
+	val = intel_panel_compute_brightness(connector, val);
+	panel->backlight.level = clamp(val, panel->backlight.min,
+				       panel->backlight.max);
 
-	panel->backlight.enabled = (pwm_ctl & BXT_BLC_PWM_ENABLE) &&
-		panel->backlight.level != 0;
+	panel->backlight.enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
 
 	return 0;
 }

+ 316 - 175
drivers/gpu/drm/i915/intel_pm.c

@@ -2853,13 +2853,7 @@ bool ilk_disable_lp_wm(struct drm_device *dev)
 	return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
 }
 
-/*
- * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
- * different active planes.
- */
-
-#define SKL_DDB_SIZE		896	/* in blocks */
-#define BXT_DDB_SIZE		512
+#define SKL_SAGV_BLOCK_TIME	30 /* µs */
 
 /*
  * Return the index of a plane in the SKL DDB and wm result arrays.  Primary
@@ -2883,6 +2877,153 @@ skl_wm_plane_id(const struct intel_plane *plane)
 	}
 }
 
+/*
+ * SAGV dynamically adjusts the system agent voltage and clock frequencies
+ * depending on power and performance requirements. The display engine access
+ * to system memory is blocked during the adjustment time. Because of the
+ * blocking time, having this enabled can cause full system hangs and/or pipe
+ * underruns if we don't meet all of the following requirements:
+ *
+ *  - <= 1 pipe enabled
+ *  - All planes can enable watermarks for latencies >= SAGV engine block time
+ *  - We're not using an interlaced display configuration
+ */
+int
+skl_enable_sagv(struct drm_i915_private *dev_priv)
+{
+	int ret;
+
+	if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
+	    dev_priv->skl_sagv_status == I915_SKL_SAGV_ENABLED)
+		return 0;
+
+	DRM_DEBUG_KMS("Enabling the SAGV\n");
+	mutex_lock(&dev_priv->rps.hw_lock);
+
+	ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+				      GEN9_SAGV_ENABLE);
+
+	/* We don't need to wait for the SAGV when enabling */
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	/*
+	 * Some skl systems, pre-release machines in particular,
+	 * don't actually have an SAGV.
+	 */
+	if (ret == -ENXIO) {
+		DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
+		dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
+		return 0;
+	} else if (ret < 0) {
+		DRM_ERROR("Failed to enable the SAGV\n");
+		return ret;
+	}
+
+	dev_priv->skl_sagv_status = I915_SKL_SAGV_ENABLED;
+	return 0;
+}
+
+static int
+skl_do_sagv_disable(struct drm_i915_private *dev_priv)
+{
+	int ret;
+	uint32_t temp = GEN9_SAGV_DISABLE;
+
+	ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+				     &temp);
+	if (ret)
+		return ret;
+	else
+		return temp & GEN9_SAGV_IS_DISABLED;
+}
+
+int
+skl_disable_sagv(struct drm_i915_private *dev_priv)
+{
+	int ret, result;
+
+	if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
+	    dev_priv->skl_sagv_status == I915_SKL_SAGV_DISABLED)
+		return 0;
+
+	DRM_DEBUG_KMS("Disabling the SAGV\n");
+	mutex_lock(&dev_priv->rps.hw_lock);
+
+	/* bspec says to keep retrying for at least 1 ms */
+	ret = wait_for(result = skl_do_sagv_disable(dev_priv), 1);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	if (ret == -ETIMEDOUT) {
+		DRM_ERROR("Request to disable SAGV timed out\n");
+		return -ETIMEDOUT;
+	}
+
+	/*
+	 * Some skl systems, pre-release machines in particular,
+	 * don't actually have an SAGV.
+	 */
+	if (result == -ENXIO) {
+		DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
+		dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
+		return 0;
+	} else if (result < 0) {
+		DRM_ERROR("Failed to disable the SAGV\n");
+		return result;
+	}
+
+	dev_priv->skl_sagv_status = I915_SKL_SAGV_DISABLED;
+	return 0;
+}
+
+bool skl_can_enable_sagv(struct drm_atomic_state *state)
+{
+	struct drm_device *dev = state->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+	struct drm_crtc *crtc;
+	enum pipe pipe;
+	int level, plane;
+
+	/*
+	 * SKL workaround: bspec recommends we disable the SAGV when we have
+	 * more then one pipe enabled
+	 *
+	 * If there are no active CRTCs, no additional checks need be performed
+	 */
+	if (hweight32(intel_state->active_crtcs) == 0)
+		return true;
+	else if (hweight32(intel_state->active_crtcs) > 1)
+		return false;
+
+	/* Since we're now guaranteed to only have one active CRTC... */
+	pipe = ffs(intel_state->active_crtcs) - 1;
+	crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+	if (crtc->state->mode.flags & DRM_MODE_FLAG_INTERLACE)
+		return false;
+
+	for_each_plane(dev_priv, pipe, plane) {
+		/* Skip this plane if it's not enabled */
+		if (intel_state->wm_results.plane[pipe][plane][0] == 0)
+			continue;
+
+		/* Find the highest enabled wm level for this plane */
+		for (level = ilk_wm_max_level(dev);
+		     intel_state->wm_results.plane[pipe][plane][level] == 0; --level)
+		     { }
+
+		/*
+		 * If any of the planes on this pipe don't enable wm levels
+		 * that incur memory latencies higher then 30µs we can't enable
+		 * the SAGV
+		 */
+		if (dev_priv->wm.skl_latency[level] < SKL_SAGV_BLOCK_TIME)
+			return false;
+	}
+
+	return true;
+}
+
 static void
 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
 				   const struct intel_crtc_state *cstate,
@@ -2909,10 +3050,8 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
 	else
 		*num_active = hweight32(dev_priv->active_crtcs);
 
-	if (IS_BROXTON(dev))
-		ddb_size = BXT_DDB_SIZE;
-	else
-		ddb_size = SKL_DDB_SIZE;
+	ddb_size = INTEL_INFO(dev_priv)->ddb_size;
+	WARN_ON(ddb_size == 0);
 
 	ddb_size -= 4; /* 4 blocks for bypass path allocation */
 
@@ -3688,183 +3827,82 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
 		I915_WRITE(reg, 0);
 }
 
-static void skl_write_wm_values(struct drm_i915_private *dev_priv,
-				const struct skl_wm_values *new)
+void skl_write_plane_wm(struct intel_crtc *intel_crtc,
+			const struct skl_wm_values *wm,
+			int plane)
 {
-	struct drm_device *dev = &dev_priv->drm;
-	struct intel_crtc *crtc;
-
-	for_each_intel_crtc(dev, crtc) {
-		int i, level, max_level = ilk_wm_max_level(dev);
-		enum pipe pipe = crtc->pipe;
-
-		if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0)
-			continue;
-		if (!crtc->active)
-			continue;
-
-		I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
-
-		for (level = 0; level <= max_level; level++) {
-			for (i = 0; i < intel_num_planes(crtc); i++)
-				I915_WRITE(PLANE_WM(pipe, i, level),
-					   new->plane[pipe][i][level]);
-			I915_WRITE(CUR_WM(pipe, level),
-				   new->plane[pipe][PLANE_CURSOR][level]);
-		}
-		for (i = 0; i < intel_num_planes(crtc); i++)
-			I915_WRITE(PLANE_WM_TRANS(pipe, i),
-				   new->plane_trans[pipe][i]);
-		I915_WRITE(CUR_WM_TRANS(pipe),
-			   new->plane_trans[pipe][PLANE_CURSOR]);
-
-		for (i = 0; i < intel_num_planes(crtc); i++) {
-			skl_ddb_entry_write(dev_priv,
-					    PLANE_BUF_CFG(pipe, i),
-					    &new->ddb.plane[pipe][i]);
-			skl_ddb_entry_write(dev_priv,
-					    PLANE_NV12_BUF_CFG(pipe, i),
-					    &new->ddb.y_plane[pipe][i]);
-		}
+	struct drm_crtc *crtc = &intel_crtc->base;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	int level, max_level = ilk_wm_max_level(dev);
+	enum pipe pipe = intel_crtc->pipe;
 
-		skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
-				    &new->ddb.plane[pipe][PLANE_CURSOR]);
+	for (level = 0; level <= max_level; level++) {
+		I915_WRITE(PLANE_WM(pipe, plane, level),
+			   wm->plane[pipe][plane][level]);
 	}
-}
+	I915_WRITE(PLANE_WM_TRANS(pipe, plane), wm->plane_trans[pipe][plane]);
 
-/*
- * When setting up a new DDB allocation arrangement, we need to correctly
- * sequence the times at which the new allocations for the pipes are taken into
- * account or we'll have pipes fetching from space previously allocated to
- * another pipe.
- *
- * Roughly the sequence looks like:
- *  1. re-allocate the pipe(s) with the allocation being reduced and not
- *     overlapping with a previous light-up pipe (another way to put it is:
- *     pipes with their new allocation strickly included into their old ones).
- *  2. re-allocate the other pipes that get their allocation reduced
- *  3. allocate the pipes having their allocation increased
- *
- * Steps 1. and 2. are here to take care of the following case:
- * - Initially DDB looks like this:
- *     |   B    |   C    |
- * - enable pipe A.
- * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
- *   allocation
- *     |  A  |  B  |  C  |
- *
- * We need to sequence the re-allocation: C, B, A (and not B, C, A).
- */
+	skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane),
+			    &wm->ddb.plane[pipe][plane]);
+	skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane),
+			    &wm->ddb.y_plane[pipe][plane]);
+}
 
-static void
-skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
+void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
+			 const struct skl_wm_values *wm)
 {
-	int plane;
-
-	DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
+	struct drm_crtc *crtc = &intel_crtc->base;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	int level, max_level = ilk_wm_max_level(dev);
+	enum pipe pipe = intel_crtc->pipe;
 
-	for_each_plane(dev_priv, pipe, plane) {
-		I915_WRITE(PLANE_SURF(pipe, plane),
-			   I915_READ(PLANE_SURF(pipe, plane)));
+	for (level = 0; level <= max_level; level++) {
+		I915_WRITE(CUR_WM(pipe, level),
+			   wm->plane[pipe][PLANE_CURSOR][level]);
 	}
-	I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
+	I915_WRITE(CUR_WM_TRANS(pipe), wm->plane_trans[pipe][PLANE_CURSOR]);
+
+	skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
+			    &wm->ddb.plane[pipe][PLANE_CURSOR]);
 }
 
-static bool
-skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
-			    const struct skl_ddb_allocation *new,
-			    enum pipe pipe)
+bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
+			       const struct skl_ddb_allocation *new,
+			       enum pipe pipe)
 {
-	uint16_t old_size, new_size;
-
-	old_size = skl_ddb_entry_size(&old->pipe[pipe]);
-	new_size = skl_ddb_entry_size(&new->pipe[pipe]);
-
-	return old_size != new_size &&
-	       new->pipe[pipe].start >= old->pipe[pipe].start &&
-	       new->pipe[pipe].end <= old->pipe[pipe].end;
+	return new->pipe[pipe].start == old->pipe[pipe].start &&
+	       new->pipe[pipe].end == old->pipe[pipe].end;
 }
 
-static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
-				struct skl_wm_values *new_values)
+static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
+					   const struct skl_ddb_entry *b)
 {
-	struct drm_device *dev = &dev_priv->drm;
-	struct skl_ddb_allocation *cur_ddb, *new_ddb;
-	bool reallocated[I915_MAX_PIPES] = {};
-	struct intel_crtc *crtc;
-	enum pipe pipe;
-
-	new_ddb = &new_values->ddb;
-	cur_ddb = &dev_priv->wm.skl_hw.ddb;
-
-	/*
-	 * First pass: flush the pipes with the new allocation contained into
-	 * the old space.
-	 *
-	 * We'll wait for the vblank on those pipes to ensure we can safely
-	 * re-allocate the freed space without this pipe fetching from it.
-	 */
-	for_each_intel_crtc(dev, crtc) {
-		if (!crtc->active)
-			continue;
-
-		pipe = crtc->pipe;
-
-		if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
-			continue;
-
-		skl_wm_flush_pipe(dev_priv, pipe, 1);
-		intel_wait_for_vblank(dev, pipe);
-
-		reallocated[pipe] = true;
-	}
-
+	return a->start < b->end && b->start < a->end;
+}
 
-	/*
-	 * Second pass: flush the pipes that are having their allocation
-	 * reduced, but overlapping with a previous allocation.
-	 *
-	 * Here as well we need to wait for the vblank to make sure the freed
-	 * space is not used anymore.
-	 */
-	for_each_intel_crtc(dev, crtc) {
-		if (!crtc->active)
-			continue;
+bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
+				 const struct skl_ddb_allocation *old,
+				 const struct skl_ddb_allocation *new,
+				 enum pipe pipe)
+{
+	struct drm_device *dev = state->dev;
+	struct intel_crtc *intel_crtc;
+	enum pipe otherp;
 
-		pipe = crtc->pipe;
+	for_each_intel_crtc(dev, intel_crtc) {
+		otherp = intel_crtc->pipe;
 
-		if (reallocated[pipe])
+		if (otherp == pipe)
 			continue;
 
-		if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
-		    skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
-			skl_wm_flush_pipe(dev_priv, pipe, 2);
-			intel_wait_for_vblank(dev, pipe);
-			reallocated[pipe] = true;
-		}
+		if (skl_ddb_entries_overlap(&new->pipe[pipe],
+					    &old->pipe[otherp]))
+			return true;
 	}
 
-	/*
-	 * Third pass: flush the pipes that got more space allocated.
-	 *
-	 * We don't need to actively wait for the update here, next vblank
-	 * will just get more DDB space with the correct WM values.
-	 */
-	for_each_intel_crtc(dev, crtc) {
-		if (!crtc->active)
-			continue;
-
-		pipe = crtc->pipe;
-
-		/*
-		 * At this point, only the pipes more space than before are
-		 * left to re-allocate.
-		 */
-		if (reallocated[pipe])
-			continue;
-
-		skl_wm_flush_pipe(dev_priv, pipe, 3);
-	}
+	return false;
 }
 
 static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
@@ -3964,11 +4002,33 @@ skl_compute_ddb(struct drm_atomic_state *state)
 		ret = skl_allocate_pipe_ddb(cstate, ddb);
 		if (ret)
 			return ret;
+
+		ret = drm_atomic_add_affected_planes(state, &intel_crtc->base);
+		if (ret)
+			return ret;
 	}
 
 	return 0;
 }
 
+static void
+skl_copy_wm_for_pipe(struct skl_wm_values *dst,
+		     struct skl_wm_values *src,
+		     enum pipe pipe)
+{
+	dst->wm_linetime[pipe] = src->wm_linetime[pipe];
+	memcpy(dst->plane[pipe], src->plane[pipe],
+	       sizeof(dst->plane[pipe]));
+	memcpy(dst->plane_trans[pipe], src->plane_trans[pipe],
+	       sizeof(dst->plane_trans[pipe]));
+
+	dst->ddb.pipe[pipe] = src->ddb.pipe[pipe];
+	memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
+	       sizeof(dst->ddb.y_plane[pipe]));
+	memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
+	       sizeof(dst->ddb.plane[pipe]));
+}
+
 static int
 skl_compute_wm(struct drm_atomic_state *state)
 {
@@ -4041,8 +4101,10 @@ static void skl_update_wm(struct drm_crtc *crtc)
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct skl_wm_values *results = &dev_priv->wm.skl_results;
+	struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
 	struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
 	struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
+	enum pipe pipe = intel_crtc->pipe;
 
 	if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
 		return;
@@ -4051,11 +4113,22 @@ static void skl_update_wm(struct drm_crtc *crtc)
 
 	mutex_lock(&dev_priv->wm.wm_mutex);
 
-	skl_write_wm_values(dev_priv, results);
-	skl_flush_wm_values(dev_priv, results);
+	/*
+	 * If this pipe isn't active already, we're going to be enabling it
+	 * very soon. Since it's safe to update a pipe's ddb allocation while
+	 * the pipe's shut off, just do so here. Already active pipes will have
+	 * their watermarks updated once we update their planes.
+	 */
+	if (crtc->state->active_changed) {
+		int plane;
+
+		for (plane = 0; plane < intel_num_planes(intel_crtc); plane++)
+			skl_write_plane_wm(intel_crtc, results, plane);
 
-	/* store the new configuration */
-	dev_priv->wm.skl_hw = *results;
+		skl_write_cursor_wm(intel_crtc, results);
+	}
+
+	skl_copy_wm_for_pipe(hw_vals, results, pipe);
 
 	mutex_unlock(&dev_priv->wm.wm_mutex);
 }
@@ -5550,7 +5623,7 @@ static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
 
 	val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
 
-	switch (INTEL_INFO(dev_priv)->eu_total) {
+	switch (INTEL_INFO(dev_priv)->sseu.eu_total) {
 	case 8:
 		/* (2 * 4) config */
 		rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
@@ -6691,9 +6764,7 @@ void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
 
 	if (IS_IRONLAKE_M(dev_priv)) {
 		ironlake_enable_drps(dev_priv);
-		mutex_lock(&dev_priv->drm.struct_mutex);
 		intel_init_emon(dev_priv);
-		mutex_unlock(&dev_priv->drm.struct_mutex);
 	} else if (INTEL_INFO(dev_priv)->gen >= 6) {
 		/*
 		 * PCU communication is slow and this doesn't need to be
@@ -7659,8 +7730,54 @@ void intel_init_pm(struct drm_device *dev)
 	}
 }
 
+static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
+{
+	uint32_t flags =
+		I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
+
+	switch (flags) {
+	case GEN6_PCODE_SUCCESS:
+		return 0;
+	case GEN6_PCODE_UNIMPLEMENTED_CMD:
+	case GEN6_PCODE_ILLEGAL_CMD:
+		return -ENXIO;
+	case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+	case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+		return -EOVERFLOW;
+	case GEN6_PCODE_TIMEOUT:
+		return -ETIMEDOUT;
+	default:
+		MISSING_CASE(flags)
+		return 0;
+	}
+}
+
+static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
+{
+	uint32_t flags =
+		I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
+
+	switch (flags) {
+	case GEN6_PCODE_SUCCESS:
+		return 0;
+	case GEN6_PCODE_ILLEGAL_CMD:
+		return -ENXIO;
+	case GEN7_PCODE_TIMEOUT:
+		return -ETIMEDOUT;
+	case GEN7_PCODE_ILLEGAL_DATA:
+		return -EINVAL;
+	case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+		return -EOVERFLOW;
+	default:
+		MISSING_CASE(flags);
+		return 0;
+	}
+}
+
 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
 {
+	int status;
+
 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
 	/* GEN6_PCODE_* are outside of the forcewake domain, we can
@@ -7687,12 +7804,25 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
 	*val = I915_READ_FW(GEN6_PCODE_DATA);
 	I915_WRITE_FW(GEN6_PCODE_DATA, 0);
 
+	if (INTEL_GEN(dev_priv) > 6)
+		status = gen7_check_mailbox_status(dev_priv);
+	else
+		status = gen6_check_mailbox_status(dev_priv);
+
+	if (status) {
+		DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n",
+				 status);
+		return status;
+	}
+
 	return 0;
 }
 
 int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
-			       u32 mbox, u32 val)
+			    u32 mbox, u32 val)
 {
+	int status;
+
 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
 	/* GEN6_PCODE_* are outside of the forcewake domain, we can
@@ -7717,6 +7847,17 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
 
 	I915_WRITE_FW(GEN6_PCODE_DATA, 0);
 
+	if (INTEL_GEN(dev_priv) > 6)
+		status = gen7_check_mailbox_status(dev_priv);
+	else
+		status = gen6_check_mailbox_status(dev_priv);
+
+	if (status) {
+		DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n",
+				 status);
+		return status;
+	}
+
 	return 0;
 }
 

+ 7 - 7
drivers/gpu/drm/i915/intel_psr.c

@@ -255,14 +255,14 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	uint32_t max_sleep_time = 0x1f;
-	/* Lately it was identified that depending on panel idle frame count
-	 * calculated at HW can be off by 1. So let's use what came
-	 * from VBT + 1.
-	 * There are also other cases where panel demands at least 4
-	 * but VBT is not being set. To cover these 2 cases lets use
-	 * at least 5 when VBT isn't set to be on the safest side.
+	/*
+	 * Let's respect VBT in case VBT asks a higher idle_frame value.
+	 * Let's use 6 as the minimum to cover all known cases including
+	 * the off-by-one issue that HW has in some cases. Also there are
+	 * cases where sink should be able to train
+	 * with the 5 or 6 idle patterns.
 	 */
-	uint32_t idle_frames = dev_priv->vbt.psr.idle_frames + 1;
+	uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
 	uint32_t val = EDP_PSR_ENABLE;
 
 	val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;

+ 48 - 30
drivers/gpu/drm/i915/intel_ringbuffer.c

@@ -559,10 +559,12 @@ static int init_ring_common(struct intel_engine_cs *engine)
 		}
 	}
 
-	if (I915_NEED_GFX_HWS(dev_priv))
-		intel_ring_setup_status_page(engine);
-	else
+	if (HWS_NEEDS_PHYSICAL(dev_priv))
 		ring_setup_phys_status_page(engine);
+	else
+		intel_ring_setup_status_page(engine);
+
+	intel_engine_reset_irq(engine);
 
 	/* Enforce ordering by reading HEAD register back */
 	I915_READ_HEAD(engine);
@@ -577,34 +579,33 @@ static int init_ring_common(struct intel_engine_cs *engine)
 	if (I915_READ_HEAD(engine))
 		DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
 			  engine->name, I915_READ_HEAD(engine));
-	I915_WRITE_HEAD(engine, 0);
-	(void)I915_READ_HEAD(engine);
+
+	intel_ring_update_space(ring);
+	I915_WRITE_HEAD(engine, ring->head);
+	I915_WRITE_TAIL(engine, ring->tail);
+	(void)I915_READ_TAIL(engine);
 
 	I915_WRITE_CTL(engine,
 			((ring->size - PAGE_SIZE) & RING_NR_PAGES)
 			| RING_VALID);
 
 	/* If the head is still not zero, the ring is dead */
-	if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 &&
-		     I915_READ_START(engine) == i915_ggtt_offset(ring->vma) &&
-		     (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) {
+	if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
+				       RING_VALID, RING_VALID,
+				       50)) {
 		DRM_ERROR("%s initialization failed "
-			  "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08x]\n",
+			  "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
 			  engine->name,
 			  I915_READ_CTL(engine),
 			  I915_READ_CTL(engine) & RING_VALID,
-			  I915_READ_HEAD(engine), I915_READ_TAIL(engine),
+			  I915_READ_HEAD(engine), ring->head,
+			  I915_READ_TAIL(engine), ring->tail,
 			  I915_READ_START(engine),
 			  i915_ggtt_offset(ring->vma));
 		ret = -EIO;
 		goto out;
 	}
 
-	ring->last_retired_head = -1;
-	ring->head = I915_READ_HEAD(engine);
-	ring->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
-	intel_ring_update_space(ring);
-
 	intel_engine_init_hangcheck(engine);
 
 out:
@@ -613,6 +614,15 @@ out:
 	return ret;
 }
 
+static void reset_ring_common(struct intel_engine_cs *engine,
+			      struct drm_i915_gem_request *request)
+{
+	struct intel_ring *ring = request->ring;
+
+	ring->head = request->postfix;
+	ring->last_retired_head = -1;
+}
+
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	struct intel_ring *ring = req->ring;
@@ -951,7 +961,7 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
 		 * Only consider slices where one, and only one, subslice has 7
 		 * EUs
 		 */
-		if (!is_power_of_2(dev_priv->info.subslice_7eu[i]))
+		if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
 			continue;
 
 		/*
@@ -960,7 +970,7 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
 		 *
 		 * ->    0 <= ss <= 3;
 		 */
-		ss = ffs(dev_priv->info.subslice_7eu[i]) - 1;
+		ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
 		vals[i] = 3 - ss;
 	}
 
@@ -2007,7 +2017,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
 	}
 	ring->vma = vma;
 
-	list_add(&ring->link, &engine->buffers);
 	return ring;
 }
 
@@ -2015,7 +2024,6 @@ void
 intel_ring_free(struct intel_ring *ring)
 {
 	i915_vma_put(ring->vma);
-	list_del(&ring->link);
 	kfree(ring);
 }
 
@@ -2109,13 +2117,13 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 		goto error;
 	}
 
-	if (I915_NEED_GFX_HWS(dev_priv)) {
-		ret = init_status_page(engine);
+	if (HWS_NEEDS_PHYSICAL(dev_priv)) {
+		WARN_ON(engine->id != RCS);
+		ret = init_phys_status_page(engine);
 		if (ret)
 			goto error;
 	} else {
-		WARN_ON(engine->id != RCS);
-		ret = init_phys_status_page(engine);
+		ret = init_status_page(engine);
 		if (ret)
 			goto error;
 	}
@@ -2155,11 +2163,11 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
 	if (engine->cleanup)
 		engine->cleanup(engine);
 
-	if (I915_NEED_GFX_HWS(dev_priv)) {
-		cleanup_status_page(engine);
-	} else {
+	if (HWS_NEEDS_PHYSICAL(dev_priv)) {
 		WARN_ON(engine->id != RCS);
 		cleanup_phys_status_page(engine);
+	} else {
+		cleanup_status_page(engine);
 	}
 
 	intel_engine_cleanup_common(engine);
@@ -2169,6 +2177,16 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
 	engine->i915 = NULL;
 }
 
+void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
+{
+	struct intel_engine_cs *engine;
+
+	for_each_engine(engine, dev_priv) {
+		engine->buffer->head = engine->buffer->tail;
+		engine->buffer->last_retired_head = -1;
+	}
+}
+
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 {
 	int ret;
@@ -2223,13 +2241,12 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 	if (WARN_ON(&target->ring_link == &ring->request_list))
 		return -ENOSPC;
 
-	ret = i915_wait_request(target, true, NULL, NO_WAITBOOST);
+	ret = i915_wait_request(target,
+				I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+				NULL, NO_WAITBOOST);
 	if (ret)
 		return ret;
 
-	if (i915_reset_in_progress(&target->i915->gpu_error))
-		return -EAGAIN;
-
 	i915_gem_request_retire_upto(target);
 
 	intel_ring_update_space(ring);
@@ -2655,6 +2672,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
 	intel_ring_init_semaphores(dev_priv, engine);
 
 	engine->init_hw = init_ring_common;
+	engine->reset_hw = reset_ring_common;
 
 	engine->emit_request = i9xx_emit_request;
 	if (i915.semaphores)

+ 20 - 6
drivers/gpu/drm/i915/intel_ringbuffer.h

@@ -87,7 +87,6 @@ struct intel_ring {
 	void *vaddr;
 
 	struct intel_engine_cs *engine;
-	struct list_head link;
 
 	struct list_head request_list;
 
@@ -157,7 +156,6 @@ struct intel_engine_cs {
 	u32		mmio_base;
 	unsigned int irq_shift;
 	struct intel_ring *buffer;
-	struct list_head buffers;
 
 	/* Rather than have every client wait upon all user interrupts,
 	 * with the herd waking after every interrupt and each doing the
@@ -211,6 +209,8 @@ struct intel_engine_cs {
 	void		(*irq_disable)(struct intel_engine_cs *engine);
 
 	int		(*init_hw)(struct intel_engine_cs *engine);
+	void		(*reset_hw)(struct intel_engine_cs *engine,
+				    struct drm_i915_gem_request *req);
 
 	int		(*init_context)(struct drm_i915_gem_request *req);
 
@@ -226,7 +226,15 @@ struct intel_engine_cs {
 #define I915_DISPATCH_PINNED BIT(1)
 #define I915_DISPATCH_RS     BIT(2)
 	int		(*emit_request)(struct drm_i915_gem_request *req);
+
+	/* Pass the request to the hardware queue (e.g. directly into
+	 * the legacy ringbuffer or to the end of an execlist).
+	 *
+	 * This is called from an atomic context with irqs disabled; must
+	 * be irq safe.
+	 */
 	void		(*submit_request)(struct drm_i915_gem_request *req);
+
 	/* Some chipsets are not quite as coherent as advertised and need
 	 * an expensive kick to force a true read of the up-to-date seqno.
 	 * However, the up-to-date seqno is not always required and the last
@@ -298,11 +306,14 @@ struct intel_engine_cs {
 	/* Execlists */
 	struct tasklet_struct irq_tasklet;
 	spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
+	struct execlist_port {
+		struct drm_i915_gem_request *request;
+		unsigned int count;
+	} execlist_port[2];
 	struct list_head execlist_queue;
 	unsigned int fw_domains;
-	unsigned int next_context_status_buffer;
-	unsigned int idle_lite_restore_wa;
 	bool disable_lite_restore_wa;
+	bool preempt_wa;
 	u32 ctx_desc_template;
 
 	/**
@@ -441,6 +452,8 @@ void intel_ring_free(struct intel_ring *ring);
 void intel_engine_stop(struct intel_engine_cs *engine);
 void intel_engine_cleanup(struct intel_engine_cs *engine);
 
+void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
+
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 
 int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
@@ -479,6 +492,7 @@ int __intel_ring_space(int head, int tail, int size);
 void intel_ring_update_space(struct intel_ring *ring);
 
 void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
+void intel_engine_reset_irq(struct intel_engine_cs *engine);
 
 void intel_engine_setup_common(struct intel_engine_cs *engine);
 int intel_engine_init_common(struct intel_engine_cs *engine);
@@ -486,11 +500,11 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
 
 static inline int intel_engine_idle(struct intel_engine_cs *engine,
-				    bool interruptible)
+				    unsigned int flags)
 {
 	/* Wait upon the last request to be completed */
 	return i915_gem_active_wait_unlocked(&engine->last_request,
-					     interruptible, NULL, NULL);
+					     flags, NULL, NULL);
 }
 
 int intel_init_render_ring_buffer(struct intel_engine_cs *engine);

+ 29 - 28
drivers/gpu/drm/i915/intel_runtime_pm.c

@@ -287,6 +287,7 @@ void intel_display_set_init_power(struct drm_i915_private *dev_priv,
  */
 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
 {
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	struct drm_device *dev = &dev_priv->drm;
 
 	/*
@@ -299,9 +300,9 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
 	 * sure vgacon can keep working normally without triggering interrupts
 	 * and error messages.
 	 */
-	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+	vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
 	outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
-	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+	vga_put(pdev, VGA_RSRC_LEGACY_IO);
 
 	if (IS_BROADWELL(dev))
 		gen8_irq_power_well_post_enable(dev_priv,
@@ -318,7 +319,7 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
 				       struct i915_power_well *power_well)
 {
-	struct drm_device *dev = &dev_priv->drm;
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 
 	/*
 	 * After we re-enable the power well, if we touch VGA register 0x3d5
@@ -331,9 +332,9 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
 	 * and error messages.
 	 */
 	if (power_well->data == SKL_DISP_PW_2) {
-		vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+		vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
 		outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
-		vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+		vga_put(pdev, VGA_RSRC_LEGACY_IO);
 
 		gen8_irq_power_well_post_enable(dev_priv,
 						1 << PIPE_C | 1 << PIPE_B);
@@ -2288,7 +2289,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
  */
 void intel_power_domains_fini(struct drm_i915_private *dev_priv)
 {
-	struct device *device = &dev_priv->drm.pdev->dev;
+	struct device *kdev = &dev_priv->drm.pdev->dev;
 
 	/*
 	 * The i915.ko module is still not prepared to be loaded when
@@ -2310,7 +2311,7 @@ void intel_power_domains_fini(struct drm_i915_private *dev_priv)
 	 * the platform doesn't support runtime PM.
 	 */
 	if (!HAS_RUNTIME_PM(dev_priv))
-		pm_runtime_put(device);
+		pm_runtime_put(kdev);
 }
 
 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
@@ -2651,10 +2652,10 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
 {
-	struct drm_device *dev = &dev_priv->drm;
-	struct device *device = &dev->pdev->dev;
+	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct device *kdev = &pdev->dev;
 
-	pm_runtime_get_sync(device);
+	pm_runtime_get_sync(kdev);
 
 	atomic_inc(&dev_priv->pm.wakeref_count);
 	assert_rpm_wakelock_held(dev_priv);
@@ -2672,11 +2673,11 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
  */
 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
 {
-	struct drm_device *dev = &dev_priv->drm;
-	struct device *device = &dev->pdev->dev;
+	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct device *kdev = &pdev->dev;
 
 	if (IS_ENABLED(CONFIG_PM)) {
-		int ret = pm_runtime_get_if_in_use(device);
+		int ret = pm_runtime_get_if_in_use(kdev);
 
 		/*
 		 * In cases runtime PM is disabled by the RPM core and we get
@@ -2714,11 +2715,11 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
 {
-	struct drm_device *dev = &dev_priv->drm;
-	struct device *device = &dev->pdev->dev;
+	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct device *kdev = &pdev->dev;
 
 	assert_rpm_wakelock_held(dev_priv);
-	pm_runtime_get_noresume(device);
+	pm_runtime_get_noresume(kdev);
 
 	atomic_inc(&dev_priv->pm.wakeref_count);
 }
@@ -2733,15 +2734,15 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
 {
-	struct drm_device *dev = &dev_priv->drm;
-	struct device *device = &dev->pdev->dev;
+	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct device *kdev = &pdev->dev;
 
 	assert_rpm_wakelock_held(dev_priv);
 	if (atomic_dec_and_test(&dev_priv->pm.wakeref_count))
 		atomic_inc(&dev_priv->pm.atomic_seq);
 
-	pm_runtime_mark_last_busy(device);
-	pm_runtime_put_autosuspend(device);
+	pm_runtime_mark_last_busy(kdev);
+	pm_runtime_put_autosuspend(kdev);
 }
 
 /**
@@ -2756,11 +2757,12 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
 {
+	struct pci_dev *pdev = dev_priv->drm.pdev;
 	struct drm_device *dev = &dev_priv->drm;
-	struct device *device = &dev->pdev->dev;
+	struct device *kdev = &pdev->dev;
 
-	pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
-	pm_runtime_mark_last_busy(device);
+	pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
+	pm_runtime_mark_last_busy(kdev);
 
 	/*
 	 * Take a permanent reference to disable the RPM functionality and drop
@@ -2769,10 +2771,10 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
 	 * platforms without RPM support.
 	 */
 	if (!HAS_RUNTIME_PM(dev)) {
-		pm_runtime_dont_use_autosuspend(device);
-		pm_runtime_get_sync(device);
+		pm_runtime_dont_use_autosuspend(kdev);
+		pm_runtime_get_sync(kdev);
 	} else {
-		pm_runtime_use_autosuspend(device);
+		pm_runtime_use_autosuspend(kdev);
 	}
 
 	/*
@@ -2780,6 +2782,5 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
 	 * We drop that here and will reacquire it during unloading in
 	 * intel_power_domains_fini().
 	 */
-	pm_runtime_put_autosuspend(device);
+	pm_runtime_put_autosuspend(kdev);
 }
-

+ 32 - 24
drivers/gpu/drm/i915/intel_sdvo.c

@@ -1003,24 +1003,22 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
 }
 
 static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
-					 const struct drm_display_mode *adjusted_mode)
+					 struct intel_crtc_state *pipe_config)
 {
 	uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
-	struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	union hdmi_infoframe frame;
 	int ret;
 	ssize_t len;
 
 	ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
-						       adjusted_mode);
+						       &pipe_config->base.adjusted_mode);
 	if (ret < 0) {
 		DRM_ERROR("couldn't fill AVI infoframe\n");
 		return false;
 	}
 
 	if (intel_sdvo->rgb_quant_range_selectable) {
-		if (intel_crtc->config->limited_color_range)
+		if (pipe_config->limited_color_range)
 			frame.avi.quantization_range =
 				HDMI_QUANTIZATION_RANGE_LIMITED;
 		else
@@ -1125,7 +1123,8 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
 }
 
 static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
-				      struct intel_crtc_state *pipe_config)
+				      struct intel_crtc_state *pipe_config,
+				      struct drm_connector_state *conn_state)
 {
 	struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
@@ -1192,22 +1191,21 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
 	return true;
 }
 
-static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
+static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
+				  struct intel_crtc_state *crtc_state,
+				  struct drm_connector_state *conn_state)
 {
 	struct drm_device *dev = intel_encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
-	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
-	struct drm_display_mode *mode = &crtc->config->base.mode;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+	struct drm_display_mode *mode = &crtc_state->base.mode;
 	struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
 	u32 sdvox;
 	struct intel_sdvo_in_out_map in_out;
 	struct intel_sdvo_dtd input_dtd, output_dtd;
 	int rate;
 
-	if (!mode)
-		return;
-
 	/* First, set the input mapping for the first input to our controlled
 	 * output. This is only correct if we're a single-input device, in
 	 * which case the first input is the output from the appropriate SDVO
@@ -1240,11 +1238,11 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
 	if (!intel_sdvo_set_target_input(intel_sdvo))
 		return;
 
-	if (crtc->config->has_hdmi_sink) {
+	if (crtc_state->has_hdmi_sink) {
 		intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
 		intel_sdvo_set_colorimetry(intel_sdvo,
 					   SDVO_COLORIMETRY_RGB256);
-		intel_sdvo_set_avi_infoframe(intel_sdvo, adjusted_mode);
+		intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state);
 	} else
 		intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
 
@@ -1260,7 +1258,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
 		DRM_INFO("Setting input timings on %s failed\n",
 			 SDVO_NAME(intel_sdvo));
 
-	switch (crtc->config->pixel_multiplier) {
+	switch (crtc_state->pixel_multiplier) {
 	default:
 		WARN(1, "unknown pixel multiplier specified\n");
 	case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
@@ -1275,7 +1273,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
 		/* The real mode polarity is set by the SDVO commands, using
 		 * struct intel_sdvo_dtd. */
 		sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
-		if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
+		if (!HAS_PCH_SPLIT(dev) && crtc_state->limited_color_range)
 			sdvox |= HDMI_COLOR_RANGE_16_235;
 		if (INTEL_INFO(dev)->gen < 5)
 			sdvox |= SDVO_BORDER_ENABLE;
@@ -1301,7 +1299,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
 	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
 		/* done in crtc_mode_set as it lives inside the dpll register */
 	} else {
-		sdvox |= (crtc->config->pixel_multiplier - 1)
+		sdvox |= (crtc_state->pixel_multiplier - 1)
 			<< SDVO_PORT_MULTIPLY_SHIFT;
 	}
 
@@ -1434,7 +1432,9 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
 	     pipe_config->pixel_multiplier, encoder_pixel_multiplier);
 }
 
-static void intel_disable_sdvo(struct intel_encoder *encoder)
+static void intel_disable_sdvo(struct intel_encoder *encoder,
+			       struct intel_crtc_state *old_crtc_state,
+			       struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
@@ -1477,16 +1477,22 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
 	}
 }
 
-static void pch_disable_sdvo(struct intel_encoder *encoder)
+static void pch_disable_sdvo(struct intel_encoder *encoder,
+			     struct intel_crtc_state *old_crtc_state,
+			     struct drm_connector_state *old_conn_state)
 {
 }
 
-static void pch_post_disable_sdvo(struct intel_encoder *encoder)
+static void pch_post_disable_sdvo(struct intel_encoder *encoder,
+				  struct intel_crtc_state *old_crtc_state,
+				  struct drm_connector_state *old_conn_state)
 {
-	intel_disable_sdvo(encoder);
+	intel_disable_sdvo(encoder, old_crtc_state, old_conn_state);
 }
 
-static void intel_enable_sdvo(struct intel_encoder *encoder)
+static void intel_enable_sdvo(struct intel_encoder *encoder,
+			      struct intel_crtc_state *pipe_config,
+			      struct drm_connector_state *conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2930,10 +2936,12 @@ static bool
 intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
 			  struct drm_device *dev)
 {
+	struct pci_dev *pdev = dev->pdev;
+
 	sdvo->ddc.owner = THIS_MODULE;
 	sdvo->ddc.class = I2C_CLASS_DDC;
 	snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
-	sdvo->ddc.dev.parent = &dev->pdev->dev;
+	sdvo->ddc.dev.parent = &pdev->dev;
 	sdvo->ddc.algo_data = sdvo;
 	sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
 

+ 14 - 0
drivers/gpu/drm/i915/intel_sprite.c

@@ -203,6 +203,9 @@ skl_update_plane(struct drm_plane *drm_plane,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_plane *intel_plane = to_intel_plane(drm_plane);
 	struct drm_framebuffer *fb = plane_state->base.fb;
+	const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
+	struct drm_crtc *crtc = crtc_state->base.crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	const int pipe = intel_plane->pipe;
 	const int plane = intel_plane->plane + 1;
 	u32 plane_ctl;
@@ -228,6 +231,9 @@ skl_update_plane(struct drm_plane *drm_plane,
 
 	plane_ctl |= skl_plane_ctl_rotation(rotation);
 
+	if (wm->dirty_pipes & drm_crtc_mask(crtc))
+		skl_write_plane_wm(intel_crtc, wm, plane);
+
 	if (key->flags) {
 		I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
 		I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
@@ -286,6 +292,14 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
 	const int pipe = intel_plane->pipe;
 	const int plane = intel_plane->plane + 1;
 
+	/*
+	 * We only populate skl_results on watermark updates, and if the
+	 * plane's visiblity isn't actually changing neither is its watermarks.
+	 */
+	if (!dplane->state->visible)
+		skl_write_plane_wm(to_intel_crtc(crtc),
+				   &dev_priv->wm.skl_results, plane);
+
 	I915_WRITE(PLANE_CTL(pipe, plane), 0);
 
 	I915_WRITE(PLANE_SURF(pipe, plane), 0);

+ 11 - 4
drivers/gpu/drm/i915/intel_tv.c

@@ -838,7 +838,9 @@ intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
 }
 
 static void
-intel_enable_tv(struct intel_encoder *encoder)
+intel_enable_tv(struct intel_encoder *encoder,
+		struct intel_crtc_state *pipe_config,
+		struct drm_connector_state *conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -851,7 +853,9 @@ intel_enable_tv(struct intel_encoder *encoder)
 }
 
 static void
-intel_disable_tv(struct intel_encoder *encoder)
+intel_disable_tv(struct intel_encoder *encoder,
+		 struct intel_crtc_state *old_crtc_state,
+		 struct drm_connector_state *old_conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -908,7 +912,8 @@ intel_tv_get_config(struct intel_encoder *encoder,
 
 static bool
 intel_tv_compute_config(struct intel_encoder *encoder,
-			struct intel_crtc_state *pipe_config)
+			struct intel_crtc_state *pipe_config,
+			struct drm_connector_state *conn_state)
 {
 	struct intel_tv *intel_tv = enc_to_tv(encoder);
 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
@@ -1010,7 +1015,9 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
 		   color_conversion->av);
 }
 
-static void intel_tv_pre_enable(struct intel_encoder *encoder)
+static void intel_tv_pre_enable(struct intel_encoder *encoder,
+				struct intel_crtc_state *pipe_config,
+				struct drm_connector_state *conn_state)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);

+ 6 - 12
drivers/gpu/drm/i915/intel_uncore.c

@@ -1018,11 +1018,9 @@ gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
 __gen5_write(8)
 __gen5_write(16)
 __gen5_write(32)
-__gen5_write(64)
 __gen2_write(8)
 __gen2_write(16)
 __gen2_write(32)
-__gen2_write(64)
 
 #undef __gen5_write
 #undef __gen2_write
@@ -1112,23 +1110,18 @@ gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
 __gen9_write(8)
 __gen9_write(16)
 __gen9_write(32)
-__gen9_write(64)
 __chv_write(8)
 __chv_write(16)
 __chv_write(32)
-__chv_write(64)
 __gen8_write(8)
 __gen8_write(16)
 __gen8_write(32)
-__gen8_write(64)
 __hsw_write(8)
 __hsw_write(16)
 __hsw_write(32)
-__hsw_write(64)
 __gen6_write(8)
 __gen6_write(16)
 __gen6_write(32)
-__gen6_write(64)
 
 #undef __gen9_write
 #undef __chv_write
@@ -1158,7 +1151,6 @@ static void vgpu_write##x(struct drm_i915_private *dev_priv, \
 __vgpu_write(8)
 __vgpu_write(16)
 __vgpu_write(32)
-__vgpu_write(64)
 
 #undef __vgpu_write
 #undef VGPU_WRITE_FOOTER
@@ -1169,7 +1161,6 @@ do { \
 	dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
 	dev_priv->uncore.funcs.mmio_writew = x##_write16; \
 	dev_priv->uncore.funcs.mmio_writel = x##_write32; \
-	dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
 } while (0)
 
 #define ASSIGN_READ_MMIO_VFUNCS(x) \
@@ -1597,8 +1588,10 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
 	if (engine_mask == ALL_ENGINES) {
 		hw_mask = GEN6_GRDOM_FULL;
 	} else {
+		unsigned int tmp;
+
 		hw_mask = 0;
-		for_each_engine_masked(engine, dev_priv, engine_mask)
+		for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
 			hw_mask |= hw_engine_mask[engine->id];
 	}
 
@@ -1714,15 +1707,16 @@ static int gen8_reset_engines(struct drm_i915_private *dev_priv,
 			      unsigned engine_mask)
 {
 	struct intel_engine_cs *engine;
+	unsigned int tmp;
 
-	for_each_engine_masked(engine, dev_priv, engine_mask)
+	for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
 		if (gen8_request_engine_reset(engine))
 			goto not_ready;
 
 	return gen6_reset_engines(dev_priv, engine_mask);
 
 not_ready:
-	for_each_engine_masked(engine, dev_priv, engine_mask)
+	for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
 		gen8_unrequest_engine_reset(engine);
 
 	return -EIO;

+ 20 - 3
include/drm/drmP.h

@@ -168,6 +168,26 @@ void drm_printk(const char *level, unsigned int category,
 /** \name Macros to make printk easier */
 /*@{*/
 
+#define _DRM_PRINTK(once, level, fmt, ...)				\
+	do {								\
+		printk##once(KERN_##level "[" DRM_NAME "] " fmt,	\
+			     ##__VA_ARGS__);				\
+	} while (0)
+
+#define DRM_INFO(fmt, ...)						\
+	_DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
+#define DRM_NOTE(fmt, ...)						\
+	_DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__)
+#define DRM_WARN(fmt, ...)						\
+	_DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__)
+
+#define DRM_INFO_ONCE(fmt, ...)						\
+	_DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__)
+#define DRM_NOTE_ONCE(fmt, ...)						\
+	_DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__)
+#define DRM_WARN_ONCE(fmt, ...)						\
+	_DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__)
+
 /**
  * Error output.
  *
@@ -202,8 +222,6 @@ void drm_printk(const char *level, unsigned int category,
 #define DRM_DEV_INFO(dev, fmt, ...)					\
 	drm_dev_printk(dev, KERN_INFO, DRM_UT_NONE, __func__, "", fmt,	\
 		       ##__VA_ARGS__)
-#define DRM_INFO(fmt, ...)						\
-	drm_printk(KERN_INFO, DRM_UT_NONE, __func__, "", fmt, ##__VA_ARGS__)
 
 #define DRM_DEV_INFO_ONCE(dev, fmt, ...)				\
 ({									\
@@ -213,7 +231,6 @@ void drm_printk(const char *level, unsigned int category,
 		DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__);			\
 	}								\
 })
-#define DRM_INFO_ONCE(fmt, ...) DRM_DEV_INFO_ONCE(NULL, fmt, ##__VA_ARGS__)
 
 /**
  * Debug output.

+ 17 - 5
include/drm/drm_dp_helper.h

@@ -211,14 +211,16 @@
 # define DP_DS_PORT_TYPE_DVI		    2
 # define DP_DS_PORT_TYPE_HDMI		    3
 # define DP_DS_PORT_TYPE_NON_EDID	    4
+# define DP_DS_PORT_TYPE_DP_DUALMODE        5
+# define DP_DS_PORT_TYPE_WIRELESS           6
 # define DP_DS_PORT_HPD			    (1 << 3)
 /* offset 1 for VGA is maximum megapixels per second / 8 */
 /* offset 2 */
-# define DP_DS_VGA_MAX_BPC_MASK		    (3 << 0)
-# define DP_DS_VGA_8BPC			    0
-# define DP_DS_VGA_10BPC		    1
-# define DP_DS_VGA_12BPC		    2
-# define DP_DS_VGA_16BPC		    3
+# define DP_DS_MAX_BPC_MASK	            (3 << 0)
+# define DP_DS_8BPC		            0
+# define DP_DS_10BPC		            1
+# define DP_DS_12BPC		            2
+# define DP_DS_16BPC		            3
 
 /* link configuration */
 #define	DP_LINK_BW_SET		            0x100
@@ -443,6 +445,9 @@
 #define DP_SOURCE_OUI			    0x300
 #define DP_SINK_OUI			    0x400
 #define DP_BRANCH_OUI			    0x500
+#define DP_BRANCH_ID                        0x503
+#define DP_BRANCH_HW_REV                    0x509
+#define DP_BRANCH_SW_REV                    0x50A
 
 #define DP_SET_POWER                        0x600
 # define DP_SET_POWER_D0                    0x1
@@ -813,6 +818,13 @@ int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
 int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
 int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
 int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+				const u8 port_cap[4]);
+int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+			      const u8 port_cap[4]);
+int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]);
+void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+			     const u8 port_cap[4], struct drm_dp_aux *aux);
 
 void drm_dp_aux_init(struct drm_dp_aux *aux);
 int drm_dp_aux_register(struct drm_dp_aux *aux);

+ 14 - 24
include/drm/i915_pciids.h

@@ -134,7 +134,7 @@
 #define INTEL_IVB_Q_IDS(info) \
 	INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */
 
-#define INTEL_HSW_D_IDS(info) \
+#define INTEL_HSW_IDS(info) \
 	INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
 	INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
 	INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
@@ -179,9 +179,7 @@
 	INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
 	INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \
 	INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
-	INTEL_VGA_DEVICE(0x0D2E, info)  /* CRW GT3 reserved */ \
-
-#define INTEL_HSW_M_IDS(info) \
+	INTEL_VGA_DEVICE(0x0D2E, info),  /* CRW GT3 reserved */ \
 	INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
 	INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
 	INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \
@@ -198,17 +196,15 @@
 	INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
 	INTEL_VGA_DEVICE(0x0D26, info)  /* CRW GT3 mobile */
 
-#define INTEL_VLV_M_IDS(info) \
+#define INTEL_VLV_IDS(info) \
 	INTEL_VGA_DEVICE(0x0f30, info), \
 	INTEL_VGA_DEVICE(0x0f31, info), \
 	INTEL_VGA_DEVICE(0x0f32, info), \
 	INTEL_VGA_DEVICE(0x0f33, info), \
-	INTEL_VGA_DEVICE(0x0157, info)
-
-#define INTEL_VLV_D_IDS(info) \
+	INTEL_VGA_DEVICE(0x0157, info), \
 	INTEL_VGA_DEVICE(0x0155, info)
 
-#define INTEL_BDW_GT12M_IDS(info)  \
+#define INTEL_BDW_GT12_IDS(info)  \
 	INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \
 	INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
 	INTEL_VGA_DEVICE(0x160B, info), /* GT1 Iris */ \
@@ -216,21 +212,17 @@
 	INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
 	INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \
 	INTEL_VGA_DEVICE(0x161B, info), /* GT2 ULT */ \
-	INTEL_VGA_DEVICE(0x161E, info)  /* GT2 ULX */
-
-#define INTEL_BDW_GT12D_IDS(info) \
+	INTEL_VGA_DEVICE(0x161E, info),  /* GT2 ULX */ \
 	INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
 	INTEL_VGA_DEVICE(0x160D, info), /* GT1 Workstation */ \
 	INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \
 	INTEL_VGA_DEVICE(0x161D, info)  /* GT2 Workstation */
 
-#define INTEL_BDW_GT3M_IDS(info) \
+#define INTEL_BDW_GT3_IDS(info) \
 	INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \
 	INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \
 	INTEL_VGA_DEVICE(0x162B, info), /* Iris */ \
-	INTEL_VGA_DEVICE(0x162E, info)  /* ULX */
-
-#define INTEL_BDW_GT3D_IDS(info) \
+	INTEL_VGA_DEVICE(0x162E, info),  /* ULX */\
 	INTEL_VGA_DEVICE(0x162A, info), /* Server */ \
 	INTEL_VGA_DEVICE(0x162D, info)  /* Workstation */
 
@@ -244,14 +236,12 @@
 	INTEL_VGA_DEVICE(0x163A, info), /* Server */ \
 	INTEL_VGA_DEVICE(0x163D, info)  /* Workstation */
 
-#define INTEL_BDW_M_IDS(info) \
-	INTEL_BDW_GT12M_IDS(info), \
-	INTEL_BDW_GT3M_IDS(info), \
-	INTEL_BDW_RSVDM_IDS(info)
-
-#define INTEL_BDW_D_IDS(info) \
-	INTEL_BDW_GT12D_IDS(info), \
-	INTEL_BDW_GT3D_IDS(info), \
+#define INTEL_BDW_IDS(info) \
+	INTEL_BDW_GT12_IDS(info), \
+	INTEL_BDW_GT3_IDS(info), \
+	INTEL_BDW_RSVDM_IDS(info), \
+	INTEL_BDW_GT12_IDS(info), \
+	INTEL_BDW_GT3_IDS(info), \
 	INTEL_BDW_RSVDD_IDS(info)
 
 #define INTEL_CHV_IDS(info) \

+ 1 - 0
include/uapi/drm/i915_drm.h

@@ -387,6 +387,7 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_HAS_EXEC_SOFTPIN	 37
 #define I915_PARAM_HAS_POOLED_EU	 38
 #define I915_PARAM_MIN_EU_IN_POOL	 39
+#define I915_PARAM_MMAP_GTT_VERSION	 40
 
 typedef struct drm_i915_getparam {
 	__s32 param;