浏览代码

Merge tag 'drm-intel-next-2017-10-23' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

This time really the last i915 batch for v4.15:

- PSR state tracking in crtc state (Ville)
- Fix eviction when the GGTT is idle but full (Chris)
- BDW DP aux channel timeout fix (James)
- LSPCON detection fixes (Shashank)
- Use for_each_pipe to iterate over pipes (Mika Kahola)
- Replace *_reference/unreference() or *_ref/unref with _get/put() (Harsha)
- Refactoring and preparation for DDI encoder type cleanup (Ville)
- Broadwell DDI FDI buf translation fix (Chris)
- Read CSB and CSB write pointer from HWSP in GVT-g VM if available (Weinan)
- GuC/HuC firmware loader refactoring (Michal)
- Make shrinking more effective and not stall so much (Chris)
- Cannonlake PLL fixes (Rodrigo)
- DP MST connector error propagation fixes (James)
- Convert timers to use timer_setup (Kees Cook)
- Skylake plane enable/disable unification (Juha-Pekka)
- Fix to actually free driver internal objects when requested (Chris)
- DDI buf trans refactoring (Ville)
- Skip waking the device to service pwrite (Chris)
- Improve DSI VBT backlight parsing abstraction (Madhav)
- Cannonlake VBT DDC pin mapping fix (Rodrigo)

* tag 'drm-intel-next-2017-10-23' of git://anongit.freedesktop.org/drm/drm-intel: (87 commits)
  drm/i915: Update DRIVER_DATE to 20171023
  drm/i915/cnl: Map VBT DDC Pin to BSpec DDC Pin.
  drm/i915: Let's use more enum intel_dpll_id pll_id.
  drm/i915: Use existing DSI backlight ports info
  drm/i915: Parse DSI backlight/cabc ports.
  drm/i915: Skip waking the device to service pwrite
  drm/i915/crt: split compute_config hook by platforms
  drm/i915: remove g4x lowfreq_avail and has_pipe_cxsr
  drm/i915: Drop the redundant hdmi prefix/suffix from a lot of variables
  drm/i915: Unify error handling for missing DDI buf trans tables
  drm/i915: Centralize the SKL DDI A/E vs. B/C/D buf trans handling
  drm/i915: Kill off the BXT buf_trans default_index
  drm/i915: Pass encoder type to cnl_ddi_vswing_sequence() explicitly
  drm/i915: Integrate BXT into intel_ddi_dp_voltage_max()
  drm/i915: Pass the level to intel_prepare_hdmi_ddi_buffers()
  drm/i915: Pass the encoder type explicitly to skl_set_iboost()
  drm/i915: Extract intel_ddi_get_buf_trans_hdmi()
  drm/i915: Relocate intel_ddi_get_buf_trans_*() functions
  drm/i915: Flush the idle-worker for debugfs/i915_drop_caches
  drm/i915: adjust get_crtc_fence_y_offset() to use base.y instead of crtc.y
  ...
Dave Airlie 7 年之前
父节点
当前提交
36a5fdf76d
共有 60 个文件被更改,包括 1945 次插入1387 次删除
  1. 13 3
      drivers/gpu/drm/drm_dp_dual_mode_helper.c
  2. 1 1
      drivers/gpu/drm/i915/Makefile
  3. 67 108
      drivers/gpu/drm/i915/i915_debugfs.c
  4. 28 13
      drivers/gpu/drm/i915/i915_drv.h
  5. 111 44
      drivers/gpu/drm/i915/i915_gem.c
  6. 1 0
      drivers/gpu/drm/i915/i915_gem_clflush.c
  7. 7 0
      drivers/gpu/drm/i915/i915_gem_evict.c
  8. 1 2
      drivers/gpu/drm/i915/i915_gem_gtt.c
  9. 8 2
      drivers/gpu/drm/i915/i915_gem_object.h
  10. 1 1
      drivers/gpu/drm/i915/i915_gem_render_state.c
  11. 57 62
      drivers/gpu/drm/i915/i915_gem_shrinker.c
  12. 4 1
      drivers/gpu/drm/i915/i915_gem_stolen.c
  13. 1 1
      drivers/gpu/drm/i915/i915_gem_tiling.c
  14. 9 7
      drivers/gpu/drm/i915/i915_gem_userptr.c
  15. 1 3
      drivers/gpu/drm/i915/i915_pci.c
  16. 1 0
      drivers/gpu/drm/i915/i915_pvinfo.h
  17. 1 1
      drivers/gpu/drm/i915/i915_reg.h
  18. 13 5
      drivers/gpu/drm/i915/i915_sw_fence.c
  19. 6 0
      drivers/gpu/drm/i915/i915_vgpu.h
  20. 12 4
      drivers/gpu/drm/i915/i915_vma.c
  21. 62 20
      drivers/gpu/drm/i915/intel_bios.c
  22. 8 10
      drivers/gpu/drm/i915/intel_breadcrumbs.c
  23. 21 5
      drivers/gpu/drm/i915/intel_crt.c
  24. 16 15
      drivers/gpu/drm/i915/intel_csr.c
  25. 464 405
      drivers/gpu/drm/i915/intel_ddi.c
  26. 79 127
      drivers/gpu/drm/i915/intel_display.c
  27. 5 5
      drivers/gpu/drm/i915/intel_dp.c
  28. 30 9
      drivers/gpu/drm/i915/intel_dp_mst.c
  29. 11 9
      drivers/gpu/drm/i915/intel_drv.h
  30. 4 33
      drivers/gpu/drm/i915/intel_dsi.c
  31. 24 20
      drivers/gpu/drm/i915/intel_engine_cs.c
  32. 8 5
      drivers/gpu/drm/i915/intel_fbc.c
  33. 2 2
      drivers/gpu/drm/i915/intel_fbdev.c
  34. 104 0
      drivers/gpu/drm/i915/intel_guc.c
  35. 13 3
      drivers/gpu/drm/i915/intel_guc.h
  36. 42 208
      drivers/gpu/drm/i915/intel_guc_fw.c
  37. 33 0
      drivers/gpu/drm/i915/intel_guc_fw.h
  38. 2 2
      drivers/gpu/drm/i915/intel_guc_fwif.h
  39. 16 10
      drivers/gpu/drm/i915/intel_hdmi.c
  40. 41 84
      drivers/gpu/drm/i915/intel_huc.c
  41. 2 1
      drivers/gpu/drm/i915/intel_lrc.c
  42. 15 7
      drivers/gpu/drm/i915/intel_lspcon.c
  43. 9 2
      drivers/gpu/drm/i915/intel_pm.c
  44. 47 72
      drivers/gpu/drm/i915/intel_psr.c
  45. 10 6
      drivers/gpu/drm/i915/intel_ringbuffer.c
  46. 2 2
      drivers/gpu/drm/i915/intel_sprite.c
  47. 14 6
      drivers/gpu/drm/i915/intel_uc.c
  48. 162 37
      drivers/gpu/drm/i915/intel_uc_fw.c
  49. 14 0
      drivers/gpu/drm/i915/intel_uc_fw.h
  50. 7 6
      drivers/gpu/drm/i915/intel_uncore.c
  51. 8 0
      drivers/gpu/drm/i915/intel_vbt_defs.h
  52. 2 2
      drivers/gpu/drm/i915/selftests/huge_pages.c
  53. 1 1
      drivers/gpu/drm/i915/selftests/i915_gem_context.c
  54. 157 5
      drivers/gpu/drm/i915/selftests/i915_gem_evict.c
  55. 1 0
      drivers/gpu/drm/i915/selftests/i915_live_selftests.h
  56. 42 0
      drivers/gpu/drm/i915/selftests/i915_sw_fence.c
  57. 78 0
      drivers/gpu/drm/i915/selftests/lib_sw_fence.c
  58. 42 0
      drivers/gpu/drm/i915/selftests/lib_sw_fence.h
  59. 1 5
      drivers/gpu/drm/i915/selftests/mock_context.c
  60. 3 5
      drivers/gpu/drm/i915/selftests/mock_engine.c

+ 13 - 3
drivers/gpu/drm/drm_dp_dual_mode_helper.c

@@ -410,6 +410,7 @@ int drm_lspcon_get_mode(struct i2c_adapter *adapter,
 {
 {
 	u8 data;
 	u8 data;
 	int ret = 0;
 	int ret = 0;
+	int retry;
 
 
 	if (!mode) {
 	if (!mode) {
 		DRM_ERROR("NULL input\n");
 		DRM_ERROR("NULL input\n");
@@ -417,10 +418,19 @@ int drm_lspcon_get_mode(struct i2c_adapter *adapter,
 	}
 	}
 
 
 	/* Read Status: i2c over aux */
 	/* Read Status: i2c over aux */
-	ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_LSPCON_CURRENT_MODE,
-				    &data, sizeof(data));
+	for (retry = 0; retry < 6; retry++) {
+		if (retry)
+			usleep_range(500, 1000);
+
+		ret = drm_dp_dual_mode_read(adapter,
+					    DP_DUAL_MODE_LSPCON_CURRENT_MODE,
+					    &data, sizeof(data));
+		if (!ret)
+			break;
+	}
+
 	if (ret < 0) {
 	if (ret < 0) {
-		DRM_ERROR("LSPCON read(0x80, 0x41) failed\n");
+		DRM_DEBUG_KMS("LSPCON read(0x80, 0x41) failed\n");
 		return -EFAULT;
 		return -EFAULT;
 	}
 	}
 
 

+ 1 - 1
drivers/gpu/drm/i915/Makefile

@@ -64,7 +64,7 @@ i915-y += intel_uc.o \
 	  intel_guc.o \
 	  intel_guc.o \
 	  intel_guc_ct.o \
 	  intel_guc_ct.o \
 	  intel_guc_log.o \
 	  intel_guc_log.o \
-	  intel_guc_loader.o \
+	  intel_guc_fw.o \
 	  intel_huc.o \
 	  intel_huc.o \
 	  i915_guc_submission.o
 	  i915_guc_submission.o
 
 

+ 67 - 108
drivers/gpu/drm/i915/i915_debugfs.c

@@ -83,7 +83,7 @@ static char get_active_flag(struct drm_i915_gem_object *obj)
 
 
 static char get_pin_flag(struct drm_i915_gem_object *obj)
 static char get_pin_flag(struct drm_i915_gem_object *obj)
 {
 {
-	return obj->pin_display ? 'p' : ' ';
+	return obj->pin_global ? 'p' : ' ';
 }
 }
 
 
 static char get_tiling_flag(struct drm_i915_gem_object *obj)
 static char get_tiling_flag(struct drm_i915_gem_object *obj)
@@ -180,8 +180,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 			pin_count++;
 			pin_count++;
 	}
 	}
 	seq_printf(m, " (pinned x %d)", pin_count);
 	seq_printf(m, " (pinned x %d)", pin_count);
-	if (obj->pin_display)
-		seq_printf(m, " (display)");
+	if (obj->pin_global)
+		seq_printf(m, " (global)");
 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 		if (!drm_mm_node_allocated(&vma->node))
 		if (!drm_mm_node_allocated(&vma->node))
 			continue;
 			continue;
@@ -271,7 +271,9 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 		goto out;
 		goto out;
 
 
 	total_obj_size = total_gtt_size = count = 0;
 	total_obj_size = total_gtt_size = count = 0;
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
+
+	spin_lock(&dev_priv->mm.obj_lock);
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
 		if (count == total)
 		if (count == total)
 			break;
 			break;
 
 
@@ -283,7 +285,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
 
 
 	}
 	}
-	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
+	list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
 		if (count == total)
 		if (count == total)
 			break;
 			break;
 
 
@@ -293,6 +295,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 		objects[count++] = obj;
 		objects[count++] = obj;
 		total_obj_size += obj->base.size;
 		total_obj_size += obj->base.size;
 	}
 	}
+	spin_unlock(&dev_priv->mm.obj_lock);
 
 
 	sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
 	sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
 
 
@@ -454,7 +457,9 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
 	mapped_size = mapped_count = 0;
 	mapped_size = mapped_count = 0;
 	purgeable_size = purgeable_count = 0;
 	purgeable_size = purgeable_count = 0;
 	huge_size = huge_count = 0;
 	huge_size = huge_count = 0;
-	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
+
+	spin_lock(&dev_priv->mm.obj_lock);
+	list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
 		size += obj->base.size;
 		size += obj->base.size;
 		++count;
 		++count;
 
 
@@ -477,11 +482,11 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
 	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
 	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
 
 
 	size = count = dpy_size = dpy_count = 0;
 	size = count = dpy_size = dpy_count = 0;
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
 		size += obj->base.size;
 		size += obj->base.size;
 		++count;
 		++count;
 
 
-		if (obj->pin_display) {
+		if (obj->pin_global) {
 			dpy_size += obj->base.size;
 			dpy_size += obj->base.size;
 			++dpy_count;
 			++dpy_count;
 		}
 		}
@@ -502,6 +507,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
 			page_sizes |= obj->mm.page_sizes.sg;
 			page_sizes |= obj->mm.page_sizes.sg;
 		}
 		}
 	}
 	}
+	spin_unlock(&dev_priv->mm.obj_lock);
+
 	seq_printf(m, "%u bound objects, %llu bytes\n",
 	seq_printf(m, "%u bound objects, %llu bytes\n",
 		   count, size);
 		   count, size);
 	seq_printf(m, "%u purgeable objects, %llu bytes\n",
 	seq_printf(m, "%u purgeable objects, %llu bytes\n",
@@ -512,7 +519,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
 		   huge_count,
 		   huge_count,
 		   stringify_page_sizes(page_sizes, buf, sizeof(buf)),
 		   stringify_page_sizes(page_sizes, buf, sizeof(buf)),
 		   huge_size);
 		   huge_size);
-	seq_printf(m, "%u display objects (pinned), %llu bytes\n",
+	seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
 		   dpy_count, dpy_size);
 		   dpy_count, dpy_size);
 
 
 	seq_printf(m, "%llu [%llu] gtt total\n",
 	seq_printf(m, "%llu [%llu] gtt total\n",
@@ -568,32 +575,46 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
 	struct drm_info_node *node = m->private;
 	struct drm_info_node *node = m->private;
 	struct drm_i915_private *dev_priv = node_to_i915(node);
 	struct drm_i915_private *dev_priv = node_to_i915(node);
 	struct drm_device *dev = &dev_priv->drm;
 	struct drm_device *dev = &dev_priv->drm;
-	bool show_pin_display_only = !!node->info_ent->data;
+	struct drm_i915_gem_object **objects;
 	struct drm_i915_gem_object *obj;
 	struct drm_i915_gem_object *obj;
 	u64 total_obj_size, total_gtt_size;
 	u64 total_obj_size, total_gtt_size;
+	unsigned long nobject, n;
 	int count, ret;
 	int count, ret;
 
 
+	nobject = READ_ONCE(dev_priv->mm.object_count);
+	objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
+	if (!objects)
+		return -ENOMEM;
+
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	total_obj_size = total_gtt_size = count = 0;
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
-		if (show_pin_display_only && !obj->pin_display)
-			continue;
+	count = 0;
+	spin_lock(&dev_priv->mm.obj_lock);
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
+		objects[count++] = obj;
+		if (count == nobject)
+			break;
+	}
+	spin_unlock(&dev_priv->mm.obj_lock);
+
+	total_obj_size = total_gtt_size = 0;
+	for (n = 0;  n < count; n++) {
+		obj = objects[n];
 
 
 		seq_puts(m, "   ");
 		seq_puts(m, "   ");
 		describe_obj(m, obj);
 		describe_obj(m, obj);
 		seq_putc(m, '\n');
 		seq_putc(m, '\n');
 		total_obj_size += obj->base.size;
 		total_obj_size += obj->base.size;
 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
-		count++;
 	}
 	}
 
 
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
 
 
 	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
 	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
 		   count, total_obj_size, total_gtt_size);
 		   count, total_obj_size, total_gtt_size);
+	kvfree(objects);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -643,54 +664,6 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
 	return 0;
 	return 0;
 }
 }
 
 
-static void print_request(struct seq_file *m,
-			  struct drm_i915_gem_request *rq,
-			  const char *prefix)
-{
-	seq_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
-		   rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
-		   rq->priotree.priority,
-		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
-		   rq->timeline->common->name);
-}
-
-static int i915_gem_request_info(struct seq_file *m, void *data)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
-	struct drm_i915_gem_request *req;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	int ret, any;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-
-	any = 0;
-	for_each_engine(engine, dev_priv, id) {
-		int count;
-
-		count = 0;
-		list_for_each_entry(req, &engine->timeline->requests, link)
-			count++;
-		if (count == 0)
-			continue;
-
-		seq_printf(m, "%s requests: %d\n", engine->name, count);
-		list_for_each_entry(req, &engine->timeline->requests, link)
-			print_request(m, req, "    ");
-
-		any++;
-	}
-	mutex_unlock(&dev->struct_mutex);
-
-	if (any == 0)
-		seq_puts(m, "No requests\n");
-
-	return 0;
-}
-
 static void i915_ring_seqno_info(struct seq_file *m,
 static void i915_ring_seqno_info(struct seq_file *m,
 				 struct intel_engine_cs *engine)
 				 struct intel_engine_cs *engine)
 {
 {
@@ -2386,27 +2359,13 @@ static int i915_llc(struct seq_file *m, void *data)
 static int i915_huc_load_status_info(struct seq_file *m, void *data)
 static int i915_huc_load_status_info(struct seq_file *m, void *data)
 {
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+	struct drm_printer p;
 
 
 	if (!HAS_HUC_UCODE(dev_priv))
 	if (!HAS_HUC_UCODE(dev_priv))
 		return 0;
 		return 0;
 
 
-	seq_puts(m, "HuC firmware status:\n");
-	seq_printf(m, "\tpath: %s\n", huc_fw->path);
-	seq_printf(m, "\tfetch: %s\n",
-		intel_uc_fw_status_repr(huc_fw->fetch_status));
-	seq_printf(m, "\tload: %s\n",
-		intel_uc_fw_status_repr(huc_fw->load_status));
-	seq_printf(m, "\tversion wanted: %d.%d\n",
-		huc_fw->major_ver_wanted, huc_fw->minor_ver_wanted);
-	seq_printf(m, "\tversion found: %d.%d\n",
-		huc_fw->major_ver_found, huc_fw->minor_ver_found);
-	seq_printf(m, "\theader: offset is %d; size = %d\n",
-		huc_fw->header_offset, huc_fw->header_size);
-	seq_printf(m, "\tuCode: offset is %d; size = %d\n",
-		huc_fw->ucode_offset, huc_fw->ucode_size);
-	seq_printf(m, "\tRSA: offset is %d; size = %d\n",
-		huc_fw->rsa_offset, huc_fw->rsa_size);
+	p = drm_seq_file_printer(m);
+	intel_uc_fw_dump(&dev_priv->huc.fw, &p);
 
 
 	intel_runtime_pm_get(dev_priv);
 	intel_runtime_pm_get(dev_priv);
 	seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
 	seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
@@ -2418,29 +2377,14 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
 static int i915_guc_load_status_info(struct seq_file *m, void *data)
 static int i915_guc_load_status_info(struct seq_file *m, void *data)
 {
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
+	struct drm_printer p;
 	u32 tmp, i;
 	u32 tmp, i;
 
 
 	if (!HAS_GUC_UCODE(dev_priv))
 	if (!HAS_GUC_UCODE(dev_priv))
 		return 0;
 		return 0;
 
 
-	seq_printf(m, "GuC firmware status:\n");
-	seq_printf(m, "\tpath: %s\n",
-		guc_fw->path);
-	seq_printf(m, "\tfetch: %s\n",
-		intel_uc_fw_status_repr(guc_fw->fetch_status));
-	seq_printf(m, "\tload: %s\n",
-		intel_uc_fw_status_repr(guc_fw->load_status));
-	seq_printf(m, "\tversion wanted: %d.%d\n",
-		guc_fw->major_ver_wanted, guc_fw->minor_ver_wanted);
-	seq_printf(m, "\tversion found: %d.%d\n",
-		guc_fw->major_ver_found, guc_fw->minor_ver_found);
-	seq_printf(m, "\theader: offset is %d; size = %d\n",
-		guc_fw->header_offset, guc_fw->header_size);
-	seq_printf(m, "\tuCode: offset is %d; size = %d\n",
-		guc_fw->ucode_offset, guc_fw->ucode_size);
-	seq_printf(m, "\tRSA: offset is %d; size = %d\n",
-		guc_fw->rsa_offset, guc_fw->rsa_size);
+	p = drm_seq_file_printer(m);
+	intel_uc_fw_dump(&dev_priv->guc.fw, &p);
 
 
 	intel_runtime_pm_get(dev_priv);
 	intel_runtime_pm_get(dev_priv);
 
 
@@ -3310,6 +3254,16 @@ static int i915_engine_info(struct seq_file *m, void *unused)
 	return 0;
 	return 0;
 }
 }
 
 
+static int i915_shrinker_info(struct seq_file *m, void *unused)
+{
+	struct drm_i915_private *i915 = node_to_i915(m->private);
+
+	seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
+	seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
+
+	return 0;
+}
+
 static int i915_semaphore_status(struct seq_file *m, void *unused)
 static int i915_semaphore_status(struct seq_file *m, void *unused)
 {
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -4225,18 +4179,20 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
 			i915_ring_test_irq_get, i915_ring_test_irq_set,
 			i915_ring_test_irq_get, i915_ring_test_irq_set,
 			"0x%08llx\n");
 			"0x%08llx\n");
 
 
-#define DROP_UNBOUND 0x1
-#define DROP_BOUND 0x2
-#define DROP_RETIRE 0x4
-#define DROP_ACTIVE 0x8
-#define DROP_FREED 0x10
-#define DROP_SHRINK_ALL 0x20
+#define DROP_UNBOUND	BIT(0)
+#define DROP_BOUND	BIT(1)
+#define DROP_RETIRE	BIT(2)
+#define DROP_ACTIVE	BIT(3)
+#define DROP_FREED	BIT(4)
+#define DROP_SHRINK_ALL	BIT(5)
+#define DROP_IDLE	BIT(6)
 #define DROP_ALL (DROP_UNBOUND	| \
 #define DROP_ALL (DROP_UNBOUND	| \
 		  DROP_BOUND	| \
 		  DROP_BOUND	| \
 		  DROP_RETIRE	| \
 		  DROP_RETIRE	| \
 		  DROP_ACTIVE	| \
 		  DROP_ACTIVE	| \
 		  DROP_FREED	| \
 		  DROP_FREED	| \
-		  DROP_SHRINK_ALL)
+		  DROP_SHRINK_ALL |\
+		  DROP_IDLE)
 static int
 static int
 i915_drop_caches_get(void *data, u64 *val)
 i915_drop_caches_get(void *data, u64 *val)
 {
 {
@@ -4252,7 +4208,8 @@ i915_drop_caches_set(void *data, u64 val)
 	struct drm_device *dev = &dev_priv->drm;
 	struct drm_device *dev = &dev_priv->drm;
 	int ret = 0;
 	int ret = 0;
 
 
-	DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
+	DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
+		  val, val & DROP_ALL);
 
 
 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
 	 * on ioctls on -EAGAIN. */
 	 * on ioctls on -EAGAIN. */
@@ -4283,6 +4240,9 @@ i915_drop_caches_set(void *data, u64 val)
 		i915_gem_shrink_all(dev_priv);
 		i915_gem_shrink_all(dev_priv);
 	fs_reclaim_release(GFP_KERNEL);
 	fs_reclaim_release(GFP_KERNEL);
 
 
+	if (val & DROP_IDLE)
+		drain_delayed_work(&dev_priv->gt.idle_work);
+
 	if (val & DROP_FREED) {
 	if (val & DROP_FREED) {
 		synchronize_rcu();
 		synchronize_rcu();
 		i915_gem_drain_freed_objects(dev_priv);
 		i915_gem_drain_freed_objects(dev_priv);
@@ -4751,9 +4711,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_capabilities", i915_capabilities, 0},
 	{"i915_capabilities", i915_capabilities, 0},
 	{"i915_gem_objects", i915_gem_object_info, 0},
 	{"i915_gem_objects", i915_gem_object_info, 0},
 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
-	{"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1},
 	{"i915_gem_stolen", i915_gem_stolen_list_info },
 	{"i915_gem_stolen", i915_gem_stolen_list_info },
-	{"i915_gem_request", i915_gem_request_info, 0},
 	{"i915_gem_seqno", i915_gem_seqno_info, 0},
 	{"i915_gem_seqno", i915_gem_seqno_info, 0},
 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
 	{"i915_gem_interrupt", i915_interrupt_info, 0},
 	{"i915_gem_interrupt", i915_interrupt_info, 0},
@@ -4791,6 +4749,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_dmc_info", i915_dmc_info, 0},
 	{"i915_dmc_info", i915_dmc_info, 0},
 	{"i915_display_info", i915_display_info, 0},
 	{"i915_display_info", i915_display_info, 0},
 	{"i915_engine_info", i915_engine_info, 0},
 	{"i915_engine_info", i915_engine_info, 0},
+	{"i915_shrinker_info", i915_shrinker_info, 0},
 	{"i915_semaphore_status", i915_semaphore_status, 0},
 	{"i915_semaphore_status", i915_semaphore_status, 0},
 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
 	{"i915_dp_mst_info", i915_dp_mst_info, 0},

+ 28 - 13
drivers/gpu/drm/i915/i915_drv.h

@@ -80,8 +80,8 @@
 
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20171012"
-#define DRIVER_TIMESTAMP	1507831511
+#define DRIVER_DATE		"20171023"
+#define DRIVER_TIMESTAMP	1508748913
 
 
 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
  * WARN_ON()) for hw state sanity checks to check for unexpected conditions
  * WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -785,7 +785,6 @@ struct intel_csr {
 	func(has_logical_ring_contexts); \
 	func(has_logical_ring_contexts); \
 	func(has_logical_ring_preemption); \
 	func(has_logical_ring_preemption); \
 	func(has_overlay); \
 	func(has_overlay); \
-	func(has_pipe_cxsr); \
 	func(has_pooled_eu); \
 	func(has_pooled_eu); \
 	func(has_psr); \
 	func(has_psr); \
 	func(has_rc6); \
 	func(has_rc6); \
@@ -1108,6 +1107,16 @@ struct intel_fbc {
 			int src_w;
 			int src_w;
 			int src_h;
 			int src_h;
 			bool visible;
 			bool visible;
+			/*
+			 * Display surface base address adjustement for
+			 * pageflips. Note that on gen4+ this only adjusts up
+			 * to a tile, offsets within a tile are handled in
+			 * the hw itself (with the TILEOFF register).
+			 */
+			int adjusted_x;
+			int adjusted_y;
+
+			int y;
 		} plane;
 		} plane;
 
 
 		struct {
 		struct {
@@ -1490,6 +1499,9 @@ struct i915_gem_mm {
 	 * always the inner lock when overlapping with struct_mutex. */
 	 * always the inner lock when overlapping with struct_mutex. */
 	struct mutex stolen_lock;
 	struct mutex stolen_lock;
 
 
+	/* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
+	spinlock_t obj_lock;
+
 	/** List of all objects in gtt_space. Used to restore gtt
 	/** List of all objects in gtt_space. Used to restore gtt
 	 * mappings on resume */
 	 * mappings on resume */
 	struct list_head bound_list;
 	struct list_head bound_list;
@@ -1510,6 +1522,7 @@ struct i915_gem_mm {
 	 */
 	 */
 	struct llist_head free_list;
 	struct llist_head free_list;
 	struct work_struct free_work;
 	struct work_struct free_work;
+	spinlock_t free_lock;
 
 
 	/**
 	/**
 	 * Small stash of WC pages
 	 * Small stash of WC pages
@@ -1765,6 +1778,8 @@ struct intel_vbt_data {
 		u16 panel_id;
 		u16 panel_id;
 		struct mipi_config *config;
 		struct mipi_config *config;
 		struct mipi_pps_data *pps;
 		struct mipi_pps_data *pps;
+		u16 bl_ports;
+		u16 cabc_ports;
 		u8 seq_version;
 		u8 seq_version;
 		u32 size;
 		u32 size;
 		u8 *data;
 		u8 *data;
@@ -1960,13 +1975,7 @@ struct i915_wa_reg {
 	u32 mask;
 	u32 mask;
 };
 };
 
 
-/*
- * RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only
- * allowing it for RCS as we don't foresee any requirement of having
- * a whitelist for other engines. When it is really required for
- * other engines then the limit need to be increased.
- */
-#define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS)
+#define I915_MAX_WA_REGS 16
 
 
 struct i915_workarounds {
 struct i915_workarounds {
 	struct i915_wa_reg reg[I915_MAX_WA_REGS];
 	struct i915_wa_reg reg[I915_MAX_WA_REGS];
@@ -3077,6 +3086,7 @@ intel_info(const struct drm_i915_private *dev_priv)
 
 
 #define CNL_REVID_A0		0x0
 #define CNL_REVID_A0		0x0
 #define CNL_REVID_B0		0x1
 #define CNL_REVID_B0		0x1
+#define CNL_REVID_C0		0x2
 
 
 #define IS_CNL_REVID(p, since, until) \
 #define IS_CNL_REVID(p, since, until) \
 	(IS_CANNONLAKE(p) && IS_REVID(p, since, until))
 	(IS_CANNONLAKE(p) && IS_REVID(p, since, until))
@@ -3168,7 +3178,6 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define I915_HAS_HOTPLUG(dev_priv)	((dev_priv)->info.has_hotplug)
 #define I915_HAS_HOTPLUG(dev_priv)	((dev_priv)->info.has_hotplug)
 
 
 #define HAS_FW_BLC(dev_priv) 	(INTEL_GEN(dev_priv) > 2)
 #define HAS_FW_BLC(dev_priv) 	(INTEL_GEN(dev_priv) > 2)
-#define HAS_PIPE_CXSR(dev_priv) ((dev_priv)->info.has_pipe_cxsr)
 #define HAS_FBC(dev_priv)	((dev_priv)->info.has_fbc)
 #define HAS_FBC(dev_priv)	((dev_priv)->info.has_fbc)
 #define HAS_CUR_FBC(dev_priv)	(!HAS_GMCH_DISPLAY(dev_priv) && INTEL_INFO(dev_priv)->gen >= 7)
 #define HAS_CUR_FBC(dev_priv)	(!HAS_GMCH_DISPLAY(dev_priv) && INTEL_INFO(dev_priv)->gen >= 7)
 
 
@@ -3565,10 +3574,16 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 	return __i915_gem_object_get_pages(obj);
 	return __i915_gem_object_get_pages(obj);
 }
 }
 
 
+static inline bool
+i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
+{
+	return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
+}
+
 static inline void
 static inline void
 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 {
 {
-	GEM_BUG_ON(!obj->mm.pages);
+	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
 
 
 	atomic_inc(&obj->mm.pages_pin_count);
 	atomic_inc(&obj->mm.pages_pin_count);
 }
 }
@@ -3582,8 +3597,8 @@ i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
 static inline void
 static inline void
 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 {
 {
+	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-	GEM_BUG_ON(!obj->mm.pages);
 
 
 	atomic_dec(&obj->mm.pages_pin_count);
 	atomic_dec(&obj->mm.pages_pin_count);
 }
 }

+ 111 - 44
drivers/gpu/drm/i915/i915_gem.c

@@ -56,7 +56,7 @@ static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
 	if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
 	if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
 		return true;
 		return true;
 
 
-	return obj->pin_display;
+	return obj->pin_global; /* currently in use by HW, keep flushed */
 }
 }
 
 
 static int
 static int
@@ -1240,7 +1240,23 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	intel_runtime_pm_get(i915);
+	if (i915_gem_object_has_struct_page(obj)) {
+		/*
+		 * Avoid waking the device up if we can fallback, as
+		 * waking/resuming is very slow (worst-case 10-100 ms
+		 * depending on PCI sleeps and our own resume time).
+		 * This easily dwarfs any performance advantage from
+		 * using the cache bypass of indirect GGTT access.
+		 */
+		if (!intel_runtime_pm_get_if_in_use(i915)) {
+			ret = -EFAULT;
+			goto out_unlock;
+		}
+	} else {
+		/* No backing pages, no fallback, we must force GGTT access */
+		intel_runtime_pm_get(i915);
+	}
+
 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
 				       PIN_MAPPABLE |
 				       PIN_MAPPABLE |
 				       PIN_NONFAULT |
 				       PIN_NONFAULT |
@@ -1257,7 +1273,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 	if (IS_ERR(vma)) {
 	if (IS_ERR(vma)) {
 		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
 		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
 		if (ret)
 		if (ret)
-			goto out_unlock;
+			goto out_rpm;
 		GEM_BUG_ON(!node.allocated);
 		GEM_BUG_ON(!node.allocated);
 	}
 	}
 
 
@@ -1320,8 +1336,9 @@ out_unpin:
 	} else {
 	} else {
 		i915_vma_unpin(vma);
 		i915_vma_unpin(vma);
 	}
 	}
-out_unlock:
+out_rpm:
 	intel_runtime_pm_put(i915);
 	intel_runtime_pm_put(i915);
+out_unlock:
 	mutex_unlock(&i915->drm.struct_mutex);
 	mutex_unlock(&i915->drm.struct_mutex);
 	return ret;
 	return ret;
 }
 }
@@ -1537,6 +1554,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
 	struct list_head *list;
 	struct list_head *list;
 	struct i915_vma *vma;
 	struct i915_vma *vma;
 
 
+	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 		if (!i915_vma_is_ggtt(vma))
 		if (!i915_vma_is_ggtt(vma))
 			break;
 			break;
@@ -1551,8 +1570,10 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
 	}
 	}
 
 
 	i915 = to_i915(obj->base.dev);
 	i915 = to_i915(obj->base.dev);
+	spin_lock(&i915->mm.obj_lock);
 	list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
 	list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
-	list_move_tail(&obj->global_link, list);
+	list_move_tail(&obj->mm.link, list);
+	spin_unlock(&i915->mm.obj_lock);
 }
 }
 
 
 /**
 /**
@@ -2196,7 +2217,7 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
 	struct address_space *mapping;
 	struct address_space *mapping;
 
 
 	lockdep_assert_held(&obj->mm.lock);
 	lockdep_assert_held(&obj->mm.lock);
-	GEM_BUG_ON(obj->mm.pages);
+	GEM_BUG_ON(i915_gem_object_has_pages(obj));
 
 
 	switch (obj->mm.madv) {
 	switch (obj->mm.madv) {
 	case I915_MADV_DONTNEED:
 	case I915_MADV_DONTNEED:
@@ -2253,13 +2274,14 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 				 enum i915_mm_subclass subclass)
 				 enum i915_mm_subclass subclass)
 {
 {
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct sg_table *pages;
 	struct sg_table *pages;
 
 
 	if (i915_gem_object_has_pinned_pages(obj))
 	if (i915_gem_object_has_pinned_pages(obj))
 		return;
 		return;
 
 
 	GEM_BUG_ON(obj->bind_count);
 	GEM_BUG_ON(obj->bind_count);
-	if (!READ_ONCE(obj->mm.pages))
+	if (!i915_gem_object_has_pages(obj))
 		return;
 		return;
 
 
 	/* May be called by shrinker from within get_pages() (on another bo) */
 	/* May be called by shrinker from within get_pages() (on another bo) */
@@ -2273,6 +2295,10 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 	pages = fetch_and_zero(&obj->mm.pages);
 	pages = fetch_and_zero(&obj->mm.pages);
 	GEM_BUG_ON(!pages);
 	GEM_BUG_ON(!pages);
 
 
+	spin_lock(&i915->mm.obj_lock);
+	list_del(&obj->mm.link);
+	spin_unlock(&i915->mm.obj_lock);
+
 	if (obj->mm.mapping) {
 	if (obj->mm.mapping) {
 		void *ptr;
 		void *ptr;
 
 
@@ -2507,7 +2533,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 	obj->mm.pages = pages;
 	obj->mm.pages = pages;
 
 
 	if (i915_gem_object_is_tiled(obj) &&
 	if (i915_gem_object_is_tiled(obj) &&
-	    to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
 		GEM_BUG_ON(obj->mm.quirked);
 		GEM_BUG_ON(obj->mm.quirked);
 		__i915_gem_object_pin_pages(obj);
 		__i915_gem_object_pin_pages(obj);
 		obj->mm.quirked = true;
 		obj->mm.quirked = true;
@@ -2529,8 +2555,11 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 		if (obj->mm.page_sizes.phys & ~0u << i)
 		if (obj->mm.page_sizes.phys & ~0u << i)
 			obj->mm.page_sizes.sg |= BIT(i);
 			obj->mm.page_sizes.sg |= BIT(i);
 	}
 	}
-
 	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
 	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
+
+	spin_lock(&i915->mm.obj_lock);
+	list_add(&obj->mm.link, &i915->mm.unbound_list);
+	spin_unlock(&i915->mm.obj_lock);
 }
 }
 
 
 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
@@ -2563,7 +2592,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
-	if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
+	if (unlikely(!i915_gem_object_has_pages(obj))) {
 		GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
 		GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
 
 
 		err = ____i915_gem_object_get_pages(obj);
 		err = ____i915_gem_object_get_pages(obj);
@@ -2648,7 +2677,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 	type &= ~I915_MAP_OVERRIDE;
 	type &= ~I915_MAP_OVERRIDE;
 
 
 	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
 	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
-		if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
+		if (unlikely(!i915_gem_object_has_pages(obj))) {
 			GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
 			GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
 
 
 			ret = ____i915_gem_object_get_pages(obj);
 			ret = ____i915_gem_object_get_pages(obj);
@@ -2660,7 +2689,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 		atomic_inc(&obj->mm.pages_pin_count);
 		atomic_inc(&obj->mm.pages_pin_count);
 		pinned = false;
 		pinned = false;
 	}
 	}
-	GEM_BUG_ON(!obj->mm.pages);
+	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
 
 
 	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
 	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
 	if (ptr && has_type != type) {
 	if (ptr && has_type != type) {
@@ -2715,7 +2744,7 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
 	 * allows it to avoid the cost of retrieving a page (either swapin
 	 * allows it to avoid the cost of retrieving a page (either swapin
 	 * or clearing-before-use) before it is overwritten.
 	 * or clearing-before-use) before it is overwritten.
 	 */
 	 */
-	if (READ_ONCE(obj->mm.pages))
+	if (i915_gem_object_has_pages(obj))
 		return -ENODEV;
 		return -ENODEV;
 
 
 	if (obj->mm.madv != I915_MADV_WILLNEED)
 	if (obj->mm.madv != I915_MADV_WILLNEED)
@@ -3090,7 +3119,6 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
 
 
 static void nop_submit_request(struct drm_i915_gem_request *request)
 static void nop_submit_request(struct drm_i915_gem_request *request)
 {
 {
-	GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
 	dma_fence_set_error(&request->fence, -EIO);
 	dma_fence_set_error(&request->fence, -EIO);
 
 
 	i915_gem_request_submit(request);
 	i915_gem_request_submit(request);
@@ -3100,7 +3128,6 @@ static void nop_complete_submit_request(struct drm_i915_gem_request *request)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 
 
-	GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
 	dma_fence_set_error(&request->fence, -EIO);
 	dma_fence_set_error(&request->fence, -EIO);
 
 
 	spin_lock_irqsave(&request->engine->timeline->lock, flags);
 	spin_lock_irqsave(&request->engine->timeline->lock, flags);
@@ -3498,7 +3525,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
 
 
 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
 {
 {
-	if (!READ_ONCE(obj->pin_display))
+	if (!READ_ONCE(obj->pin_global))
 		return;
 		return;
 
 
 	mutex_lock(&obj->base.dev->struct_mutex);
 	mutex_lock(&obj->base.dev->struct_mutex);
@@ -3865,10 +3892,10 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 
 
 	lockdep_assert_held(&obj->base.dev->struct_mutex);
 	lockdep_assert_held(&obj->base.dev->struct_mutex);
 
 
-	/* Mark the pin_display early so that we account for the
+	/* Mark the global pin early so that we account for the
 	 * display coherency whilst setting up the cache domains.
 	 * display coherency whilst setting up the cache domains.
 	 */
 	 */
-	obj->pin_display++;
+	obj->pin_global++;
 
 
 	/* The display engine is not coherent with the LLC cache on gen6.  As
 	/* The display engine is not coherent with the LLC cache on gen6.  As
 	 * a result, we make sure that the pinning that is about to occur is
 	 * a result, we make sure that the pinning that is about to occur is
@@ -3884,7 +3911,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 					      I915_CACHE_WT : I915_CACHE_NONE);
 					      I915_CACHE_WT : I915_CACHE_NONE);
 	if (ret) {
 	if (ret) {
 		vma = ERR_PTR(ret);
 		vma = ERR_PTR(ret);
-		goto err_unpin_display;
+		goto err_unpin_global;
 	}
 	}
 
 
 	/* As the user may map the buffer once pinned in the display plane
 	/* As the user may map the buffer once pinned in the display plane
@@ -3915,7 +3942,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
 		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
 	}
 	}
 	if (IS_ERR(vma))
 	if (IS_ERR(vma))
-		goto err_unpin_display;
+		goto err_unpin_global;
 
 
 	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
 	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
 
 
@@ -3930,8 +3957,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 
 
 	return vma;
 	return vma;
 
 
-err_unpin_display:
-	obj->pin_display--;
+err_unpin_global:
+	obj->pin_global--;
 	return vma;
 	return vma;
 }
 }
 
 
@@ -3940,10 +3967,10 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
 {
 {
 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
 
 
-	if (WARN_ON(vma->obj->pin_display == 0))
+	if (WARN_ON(vma->obj->pin_global == 0))
 		return;
 		return;
 
 
-	if (--vma->obj->pin_display == 0)
+	if (--vma->obj->pin_global == 0)
 		vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
 		vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
 
 
 	/* Bump the LRU to try and avoid premature eviction whilst flipping  */
 	/* Bump the LRU to try and avoid premature eviction whilst flipping  */
@@ -4283,7 +4310,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 	if (err)
 	if (err)
 		goto out;
 		goto out;
 
 
-	if (obj->mm.pages &&
+	if (i915_gem_object_has_pages(obj) &&
 	    i915_gem_object_is_tiled(obj) &&
 	    i915_gem_object_is_tiled(obj) &&
 	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
 	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
 		if (obj->mm.madv == I915_MADV_WILLNEED) {
 		if (obj->mm.madv == I915_MADV_WILLNEED) {
@@ -4302,7 +4329,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 		obj->mm.madv = args->madv;
 		obj->mm.madv = args->madv;
 
 
 	/* if the object is no longer attached, discard its backing storage */
 	/* if the object is no longer attached, discard its backing storage */
-	if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
+	if (obj->mm.madv == I915_MADV_DONTNEED &&
+	    !i915_gem_object_has_pages(obj))
 		i915_gem_object_truncate(obj);
 		i915_gem_object_truncate(obj);
 
 
 	args->retained = obj->mm.madv != __I915_MADV_PURGED;
 	args->retained = obj->mm.madv != __I915_MADV_PURGED;
@@ -4328,7 +4356,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 {
 {
 	mutex_init(&obj->mm.lock);
 	mutex_init(&obj->mm.lock);
 
 
-	INIT_LIST_HEAD(&obj->global_link);
 	INIT_LIST_HEAD(&obj->vma_list);
 	INIT_LIST_HEAD(&obj->vma_list);
 	INIT_LIST_HEAD(&obj->lut_list);
 	INIT_LIST_HEAD(&obj->lut_list);
 	INIT_LIST_HEAD(&obj->batch_pool_link);
 	INIT_LIST_HEAD(&obj->batch_pool_link);
@@ -4483,13 +4510,14 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 {
 {
 	struct drm_i915_gem_object *obj, *on;
 	struct drm_i915_gem_object *obj, *on;
 
 
-	mutex_lock(&i915->drm.struct_mutex);
 	intel_runtime_pm_get(i915);
 	intel_runtime_pm_get(i915);
-	llist_for_each_entry(obj, freed, freed) {
+	llist_for_each_entry_safe(obj, on, freed, freed) {
 		struct i915_vma *vma, *vn;
 		struct i915_vma *vma, *vn;
 
 
 		trace_i915_gem_object_destroy(obj);
 		trace_i915_gem_object_destroy(obj);
 
 
+		mutex_lock(&i915->drm.struct_mutex);
+
 		GEM_BUG_ON(i915_gem_object_is_active(obj));
 		GEM_BUG_ON(i915_gem_object_is_active(obj));
 		list_for_each_entry_safe(vma, vn,
 		list_for_each_entry_safe(vma, vn,
 					 &obj->vma_list, obj_link) {
 					 &obj->vma_list, obj_link) {
@@ -4500,14 +4528,20 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 		GEM_BUG_ON(!list_empty(&obj->vma_list));
 		GEM_BUG_ON(!list_empty(&obj->vma_list));
 		GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
 		GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
 
 
-		list_del(&obj->global_link);
-	}
-	intel_runtime_pm_put(i915);
-	mutex_unlock(&i915->drm.struct_mutex);
+		/* This serializes freeing with the shrinker. Since the free
+		 * is delayed, first by RCU then by the workqueue, we want the
+		 * shrinker to be able to free pages of unreferenced objects,
+		 * or else we may oom whilst there are plenty of deferred
+		 * freed objects.
+		 */
+		if (i915_gem_object_has_pages(obj)) {
+			spin_lock(&i915->mm.obj_lock);
+			list_del_init(&obj->mm.link);
+			spin_unlock(&i915->mm.obj_lock);
+		}
 
 
-	cond_resched();
+		mutex_unlock(&i915->drm.struct_mutex);
 
 
-	llist_for_each_entry_safe(obj, on, freed, freed) {
 		GEM_BUG_ON(obj->bind_count);
 		GEM_BUG_ON(obj->bind_count);
 		GEM_BUG_ON(obj->userfault_count);
 		GEM_BUG_ON(obj->userfault_count);
 		GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
 		GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
@@ -4519,7 +4553,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 		if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
 		if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
 			atomic_set(&obj->mm.pages_pin_count, 0);
 			atomic_set(&obj->mm.pages_pin_count, 0);
 		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
 		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
-		GEM_BUG_ON(obj->mm.pages);
+		GEM_BUG_ON(i915_gem_object_has_pages(obj));
 
 
 		if (obj->base.import_attach)
 		if (obj->base.import_attach)
 			drm_prime_gem_destroy(&obj->base, NULL);
 			drm_prime_gem_destroy(&obj->base, NULL);
@@ -4530,16 +4564,29 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 
 
 		kfree(obj->bit_17);
 		kfree(obj->bit_17);
 		i915_gem_object_free(obj);
 		i915_gem_object_free(obj);
+
+		if (on)
+			cond_resched();
 	}
 	}
+	intel_runtime_pm_put(i915);
 }
 }
 
 
 static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
 static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
 {
 {
 	struct llist_node *freed;
 	struct llist_node *freed;
 
 
-	freed = llist_del_all(&i915->mm.free_list);
-	if (unlikely(freed))
+	/* Free the oldest, most stale object to keep the free_list short */
+	freed = NULL;
+	if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
+		/* Only one consumer of llist_del_first() allowed */
+		spin_lock(&i915->mm.free_lock);
+		freed = llist_del_first(&i915->mm.free_list);
+		spin_unlock(&i915->mm.free_lock);
+	}
+	if (unlikely(freed)) {
+		freed->next = NULL;
 		__i915_gem_free_objects(i915, freed);
 		__i915_gem_free_objects(i915, freed);
+	}
 }
 }
 
 
 static void __i915_gem_free_work(struct work_struct *work)
 static void __i915_gem_free_work(struct work_struct *work)
@@ -4840,6 +4887,10 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
 	init_unused_rings(dev_priv);
 	init_unused_rings(dev_priv);
 
 
 	BUG_ON(!dev_priv->kernel_context);
 	BUG_ON(!dev_priv->kernel_context);
+	if (i915_terminally_wedged(&dev_priv->gpu_error)) {
+		ret = -EIO;
+		goto out;
+	}
 
 
 	ret = i915_ppgtt_init_hw(dev_priv);
 	ret = i915_ppgtt_init_hw(dev_priv);
 	if (ret) {
 	if (ret) {
@@ -4938,8 +4989,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 		 * wedged. But we only want to do this where the GPU is angry,
 		 * wedged. But we only want to do this where the GPU is angry,
 		 * for all other failure, such as an allocation failure, bail.
 		 * for all other failure, such as an allocation failure, bail.
 		 */
 		 */
-		DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
-		i915_gem_set_wedged(dev_priv);
+		if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
+			DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
+			i915_gem_set_wedged(dev_priv);
+		}
 		ret = 0;
 		ret = 0;
 	}
 	}
 
 
@@ -5039,11 +5092,15 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
 		goto err_priorities;
 		goto err_priorities;
 
 
 	INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
 	INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
+
+	spin_lock_init(&dev_priv->mm.obj_lock);
+	spin_lock_init(&dev_priv->mm.free_lock);
 	init_llist_head(&dev_priv->mm.free_list);
 	init_llist_head(&dev_priv->mm.free_list);
 	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
 	INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
 	INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
+
 	INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
 	INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
 			  i915_gem_retire_work_handler);
 			  i915_gem_retire_work_handler);
 	INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
 	INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
@@ -5137,12 +5194,12 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
 	i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
 	i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
 	i915_gem_drain_freed_objects(dev_priv);
 	i915_gem_drain_freed_objects(dev_priv);
 
 
-	mutex_lock(&dev_priv->drm.struct_mutex);
+	spin_lock(&dev_priv->mm.obj_lock);
 	for (p = phases; *p; p++) {
 	for (p = phases; *p; p++) {
-		list_for_each_entry(obj, *p, global_link)
+		list_for_each_entry(obj, *p, mm.link)
 			__start_cpu_write(obj);
 			__start_cpu_write(obj);
 	}
 	}
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	spin_unlock(&dev_priv->mm.obj_lock);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -5461,7 +5518,17 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
 		goto err_unlock;
 		goto err_unlock;
 	}
 	}
 
 
-	pages = obj->mm.pages;
+	pages = fetch_and_zero(&obj->mm.pages);
+	if (pages) {
+		struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+		__i915_gem_object_reset_page_iter(obj);
+
+		spin_lock(&i915->mm.obj_lock);
+		list_del(&obj->mm.link);
+		spin_unlock(&i915->mm.obj_lock);
+	}
+
 	obj->ops = &i915_gem_phys_ops;
 	obj->ops = &i915_gem_phys_ops;
 
 
 	err = ____i915_gem_object_get_pages(obj);
 	err = ____i915_gem_object_get_pages(obj);

+ 1 - 0
drivers/gpu/drm/i915/i915_gem_clflush.c

@@ -70,6 +70,7 @@ static const struct dma_fence_ops i915_clflush_ops = {
 
 
 static void __i915_do_clflush(struct drm_i915_gem_object *obj)
 static void __i915_do_clflush(struct drm_i915_gem_object *obj)
 {
 {
+	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
 	drm_clflush_sg(obj->mm.pages);
 	drm_clflush_sg(obj->mm.pages);
 	intel_fb_obj_flush(obj, ORIGIN_CPU);
 	intel_fb_obj_flush(obj, ORIGIN_CPU);
 }
 }

+ 7 - 0
drivers/gpu/drm/i915/i915_gem_evict.c

@@ -33,6 +33,10 @@
 #include "intel_drv.h"
 #include "intel_drv.h"
 #include "i915_trace.h"
 #include "i915_trace.h"
 
 
+I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
+	bool fail_if_busy:1;
+} igt_evict_ctl;)
+
 static bool ggtt_is_idle(struct drm_i915_private *i915)
 static bool ggtt_is_idle(struct drm_i915_private *i915)
 {
 {
        struct intel_engine_cs *engine;
        struct intel_engine_cs *engine;
@@ -205,6 +209,9 @@ search_again:
 	 * the kernel's there is no more we can evict.
 	 * the kernel's there is no more we can evict.
 	 */
 	 */
 	if (!ggtt_is_idle(dev_priv)) {
 	if (!ggtt_is_idle(dev_priv)) {
+		if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
+			return -EBUSY;
+
 		ret = ggtt_flush(dev_priv);
 		ret = ggtt_flush(dev_priv);
 		if (ret)
 		if (ret)
 			return ret;
 			return ret;

+ 1 - 2
drivers/gpu/drm/i915/i915_gem_gtt.c

@@ -3594,8 +3594,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
 	ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
 	ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
 
 
 	/* clflush objects bound into the GGTT and rebind them. */
 	/* clflush objects bound into the GGTT and rebind them. */
-	list_for_each_entry_safe(obj, on,
-				 &dev_priv->mm.bound_list, global_link) {
+	list_for_each_entry_safe(obj, on, &dev_priv->mm.bound_list, mm.link) {
 		bool ggtt_bound = false;
 		bool ggtt_bound = false;
 		struct i915_vma *vma;
 		struct i915_vma *vma;
 
 

+ 8 - 2
drivers/gpu/drm/i915/i915_gem_object.h

@@ -114,7 +114,6 @@ struct drm_i915_gem_object {
 
 
 	/** Stolen memory for this object, instead of being backed by shmem. */
 	/** Stolen memory for this object, instead of being backed by shmem. */
 	struct drm_mm_node *stolen;
 	struct drm_mm_node *stolen;
-	struct list_head global_link;
 	union {
 	union {
 		struct rcu_head rcu;
 		struct rcu_head rcu;
 		struct llist_node freed;
 		struct llist_node freed;
@@ -161,7 +160,8 @@ struct drm_i915_gem_object {
 	/** Count of VMA actually bound by this object */
 	/** Count of VMA actually bound by this object */
 	unsigned int bind_count;
 	unsigned int bind_count;
 	unsigned int active_count;
 	unsigned int active_count;
-	unsigned int pin_display;
+	/** Count of how many global VMA are currently pinned for use by HW */
+	unsigned int pin_global;
 
 
 	struct {
 	struct {
 		struct mutex lock; /* protects the pages and their use */
 		struct mutex lock; /* protects the pages and their use */
@@ -207,6 +207,12 @@ struct drm_i915_gem_object {
 			struct mutex lock; /* protects this cache */
 			struct mutex lock; /* protects this cache */
 		} get_page;
 		} get_page;
 
 
+		/**
+		 * Element within i915->mm.unbound_list or i915->mm.bound_list,
+		 * locked by i915->mm.obj_lock.
+		 */
+		struct list_head link;
+
 		/**
 		/**
 		 * Advice: are the backing pages purgeable?
 		 * Advice: are the backing pages purgeable?
 		 */
 		 */

+ 1 - 1
drivers/gpu/drm/i915/i915_gem_render_state.c

@@ -229,7 +229,7 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
 		return 0;
 		return 0;
 
 
 	/* Recreate the page after shrinking */
 	/* Recreate the page after shrinking */
-	if (!so->vma->obj->mm.pages)
+	if (!i915_gem_object_has_pages(so->vma->obj))
 		so->batch_offset = -1;
 		so->batch_offset = -1;
 
 
 	ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
 	ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);

+ 57 - 62
drivers/gpu/drm/i915/i915_gem_shrinker.c

@@ -71,25 +71,6 @@ static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock)
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 }
 }
 
 
-static bool any_vma_pinned(struct drm_i915_gem_object *obj)
-{
-	struct i915_vma *vma;
-
-	list_for_each_entry(vma, &obj->vma_list, obj_link) {
-		/* Only GGTT vma may be permanently pinned, and are always
-		 * at the start of the list. We can stop hunting as soon
-		 * as we see a ppGTT vma.
-		 */
-		if (!i915_vma_is_ggtt(vma))
-			break;
-
-		if (i915_vma_is_pinned(vma))
-			return true;
-	}
-
-	return false;
-}
-
 static bool swap_available(void)
 static bool swap_available(void)
 {
 {
 	return get_nr_swap_pages() > 0;
 	return get_nr_swap_pages() > 0;
@@ -97,9 +78,6 @@ static bool swap_available(void)
 
 
 static bool can_release_pages(struct drm_i915_gem_object *obj)
 static bool can_release_pages(struct drm_i915_gem_object *obj)
 {
 {
-	if (!obj->mm.pages)
-		return false;
-
 	/* Consider only shrinkable ojects. */
 	/* Consider only shrinkable ojects. */
 	if (!i915_gem_object_is_shrinkable(obj))
 	if (!i915_gem_object_is_shrinkable(obj))
 		return false;
 		return false;
@@ -115,7 +93,13 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
 	if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
 	if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
 		return false;
 		return false;
 
 
-	if (any_vma_pinned(obj))
+	/* If any vma are "permanently" pinned, it will prevent us from
+	 * reclaiming the obj->mm.pages. We only allow scanout objects to claim
+	 * a permanent pin, along with a few others like the context objects.
+	 * To simplify the scan, and to avoid walking the list of vma under the
+	 * object, we just check the count of its permanently pinned.
+	 */
+	if (READ_ONCE(obj->pin_global))
 		return false;
 		return false;
 
 
 	/* We can only return physical pages to the system if we can either
 	/* We can only return physical pages to the system if we can either
@@ -129,7 +113,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
 {
 {
 	if (i915_gem_object_unbind(obj) == 0)
 	if (i915_gem_object_unbind(obj) == 0)
 		__i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
 		__i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
-	return !READ_ONCE(obj->mm.pages);
+	return !i915_gem_object_has_pages(obj);
 }
 }
 
 
 /**
 /**
@@ -217,15 +201,20 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
 			continue;
 			continue;
 
 
 		INIT_LIST_HEAD(&still_in_list);
 		INIT_LIST_HEAD(&still_in_list);
+
+		/*
+		 * We serialize our access to unreferenced objects through
+		 * the use of the struct_mutex. While the objects are not
+		 * yet freed (due to RCU then a workqueue) we still want
+		 * to be able to shrink their pages, so they remain on
+		 * the unbound/bound list until actually freed.
+		 */
+		spin_lock(&dev_priv->mm.obj_lock);
 		while (count < target &&
 		while (count < target &&
 		       (obj = list_first_entry_or_null(phase->list,
 		       (obj = list_first_entry_or_null(phase->list,
 						       typeof(*obj),
 						       typeof(*obj),
-						       global_link))) {
-			list_move_tail(&obj->global_link, &still_in_list);
-			if (!obj->mm.pages) {
-				list_del_init(&obj->global_link);
-				continue;
-			}
+						       mm.link))) {
+			list_move_tail(&obj->mm.link, &still_in_list);
 
 
 			if (flags & I915_SHRINK_PURGEABLE &&
 			if (flags & I915_SHRINK_PURGEABLE &&
 			    obj->mm.madv != I915_MADV_DONTNEED)
 			    obj->mm.madv != I915_MADV_DONTNEED)
@@ -243,20 +232,24 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
 			if (!can_release_pages(obj))
 			if (!can_release_pages(obj))
 				continue;
 				continue;
 
 
+			spin_unlock(&dev_priv->mm.obj_lock);
+
 			if (unsafe_drop_pages(obj)) {
 			if (unsafe_drop_pages(obj)) {
 				/* May arrive from get_pages on another bo */
 				/* May arrive from get_pages on another bo */
 				mutex_lock_nested(&obj->mm.lock,
 				mutex_lock_nested(&obj->mm.lock,
 						  I915_MM_SHRINKER);
 						  I915_MM_SHRINKER);
-				if (!obj->mm.pages) {
+				if (!i915_gem_object_has_pages(obj)) {
 					__i915_gem_object_invalidate(obj);
 					__i915_gem_object_invalidate(obj);
-					list_del_init(&obj->global_link);
 					count += obj->base.size >> PAGE_SHIFT;
 					count += obj->base.size >> PAGE_SHIFT;
 				}
 				}
 				mutex_unlock(&obj->mm.lock);
 				mutex_unlock(&obj->mm.lock);
-				scanned += obj->base.size >> PAGE_SHIFT;
 			}
 			}
+			scanned += obj->base.size >> PAGE_SHIFT;
+
+			spin_lock(&dev_priv->mm.obj_lock);
 		}
 		}
 		list_splice_tail(&still_in_list, phase->list);
 		list_splice_tail(&still_in_list, phase->list);
+		spin_unlock(&dev_priv->mm.obj_lock);
 	}
 	}
 
 
 	if (flags & I915_SHRINK_BOUND)
 	if (flags & I915_SHRINK_BOUND)
@@ -302,28 +295,39 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 static unsigned long
 static unsigned long
 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 {
 {
-	struct drm_i915_private *dev_priv =
+	struct drm_i915_private *i915 =
 		container_of(shrinker, struct drm_i915_private, mm.shrinker);
 		container_of(shrinker, struct drm_i915_private, mm.shrinker);
 	struct drm_i915_gem_object *obj;
 	struct drm_i915_gem_object *obj;
-	unsigned long count;
-	bool unlock;
-
-	if (!shrinker_lock(dev_priv, &unlock))
-		return 0;
-
-	i915_gem_retire_requests(dev_priv);
+	unsigned long num_objects = 0;
+	unsigned long count = 0;
 
 
-	count = 0;
-	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link)
-		if (can_release_pages(obj))
+	spin_lock(&i915->mm.obj_lock);
+	list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
+		if (can_release_pages(obj)) {
 			count += obj->base.size >> PAGE_SHIFT;
 			count += obj->base.size >> PAGE_SHIFT;
+			num_objects++;
+		}
 
 
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
-		if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
+	list_for_each_entry(obj, &i915->mm.bound_list, mm.link)
+		if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) {
 			count += obj->base.size >> PAGE_SHIFT;
 			count += obj->base.size >> PAGE_SHIFT;
-	}
+			num_objects++;
+		}
+	spin_unlock(&i915->mm.obj_lock);
 
 
-	shrinker_unlock(dev_priv, unlock);
+	/* Update our preferred vmscan batch size for the next pass.
+	 * Our rough guess for an effective batch size is roughly 2
+	 * available GEM objects worth of pages. That is we don't want
+	 * the shrinker to fire, until it is worth the cost of freeing an
+	 * entire GEM object.
+	 */
+	if (num_objects) {
+		unsigned long avg = 2 * count / num_objects;
+
+		i915->mm.shrinker.batch =
+			max((i915->mm.shrinker.batch + avg) >> 1,
+			    128ul /* default SHRINK_BATCH */);
+	}
 
 
 	return count;
 	return count;
 }
 }
@@ -400,10 +404,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
 		container_of(nb, struct drm_i915_private, mm.oom_notifier);
 		container_of(nb, struct drm_i915_private, mm.oom_notifier);
 	struct drm_i915_gem_object *obj;
 	struct drm_i915_gem_object *obj;
 	unsigned long unevictable, bound, unbound, freed_pages;
 	unsigned long unevictable, bound, unbound, freed_pages;
-	bool unlock;
-
-	if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
-		return NOTIFY_DONE;
 
 
 	freed_pages = i915_gem_shrink_all(dev_priv);
 	freed_pages = i915_gem_shrink_all(dev_priv);
 
 
@@ -412,26 +412,20 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
 	 * being pointed to by hardware.
 	 * being pointed to by hardware.
 	 */
 	 */
 	unbound = bound = unevictable = 0;
 	unbound = bound = unevictable = 0;
-	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
-		if (!obj->mm.pages)
-			continue;
-
+	spin_lock(&dev_priv->mm.obj_lock);
+	list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
 		if (!can_release_pages(obj))
 		if (!can_release_pages(obj))
 			unevictable += obj->base.size >> PAGE_SHIFT;
 			unevictable += obj->base.size >> PAGE_SHIFT;
 		else
 		else
 			unbound += obj->base.size >> PAGE_SHIFT;
 			unbound += obj->base.size >> PAGE_SHIFT;
 	}
 	}
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
-		if (!obj->mm.pages)
-			continue;
-
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
 		if (!can_release_pages(obj))
 		if (!can_release_pages(obj))
 			unevictable += obj->base.size >> PAGE_SHIFT;
 			unevictable += obj->base.size >> PAGE_SHIFT;
 		else
 		else
 			bound += obj->base.size >> PAGE_SHIFT;
 			bound += obj->base.size >> PAGE_SHIFT;
 	}
 	}
-
-	shrinker_unlock(dev_priv, unlock);
+	spin_unlock(&dev_priv->mm.obj_lock);
 
 
 	if (freed_pages || unbound || bound)
 	if (freed_pages || unbound || bound)
 		pr_info("Purging GPU memory, %lu pages freed, "
 		pr_info("Purging GPU memory, %lu pages freed, "
@@ -498,6 +492,7 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
 	dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
 	dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
 	dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
 	dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
 	dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
 	dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
+	dev_priv->mm.shrinker.batch = 4096;
 	WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
 	WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
 
 
 	dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
 	dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;

+ 4 - 1
drivers/gpu/drm/i915/i915_gem_stolen.c

@@ -724,8 +724,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
 	vma->flags |= I915_VMA_GLOBAL_BIND;
 	vma->flags |= I915_VMA_GLOBAL_BIND;
 	__i915_vma_set_map_and_fenceable(vma);
 	__i915_vma_set_map_and_fenceable(vma);
 	list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
 	list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
-	list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
+
+	spin_lock(&dev_priv->mm.obj_lock);
+	list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
 	obj->bind_count++;
 	obj->bind_count++;
+	spin_unlock(&dev_priv->mm.obj_lock);
 
 
 	return obj;
 	return obj;
 
 

+ 1 - 1
drivers/gpu/drm/i915/i915_gem_tiling.c

@@ -269,7 +269,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
 	 * due to the change in swizzling.
 	 * due to the change in swizzling.
 	 */
 	 */
 	mutex_lock(&obj->mm.lock);
 	mutex_lock(&obj->mm.lock);
-	if (obj->mm.pages &&
+	if (i915_gem_object_has_pages(obj) &&
 	    obj->mm.madv == I915_MADV_WILLNEED &&
 	    obj->mm.madv == I915_MADV_WILLNEED &&
 	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
 	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
 		if (tiling == I915_TILING_NONE) {
 		if (tiling == I915_TILING_NONE) {

+ 9 - 7
drivers/gpu/drm/i915/i915_gem_userptr.c

@@ -82,11 +82,11 @@ static void cancel_userptr(struct work_struct *work)
 	/* We are inside a kthread context and can't be interrupted */
 	/* We are inside a kthread context and can't be interrupted */
 	if (i915_gem_object_unbind(obj) == 0)
 	if (i915_gem_object_unbind(obj) == 0)
 		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
 		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
-	WARN_ONCE(obj->mm.pages,
-		  "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
+	WARN_ONCE(i915_gem_object_has_pages(obj),
+		  "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_global=%d\n",
 		  obj->bind_count,
 		  obj->bind_count,
 		  atomic_read(&obj->mm.pages_pin_count),
 		  atomic_read(&obj->mm.pages_pin_count),
-		  obj->pin_display);
+		  obj->pin_global);
 
 
 	mutex_unlock(&obj->base.dev->struct_mutex);
 	mutex_unlock(&obj->base.dev->struct_mutex);
 
 
@@ -221,15 +221,17 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
 			/* Protected by mm_lock */
 			/* Protected by mm_lock */
 			mm->mn = fetch_and_zero(&mn);
 			mm->mn = fetch_and_zero(&mn);
 		}
 		}
-	} else {
-		/* someone else raced and successfully installed the mmu
-		 * notifier, we can cancel our own errors */
+	} else if (mm->mn) {
+		/*
+		 * Someone else raced and successfully installed the mmu
+		 * notifier, we can cancel our own errors.
+		 */
 		err = 0;
 		err = 0;
 	}
 	}
 	mutex_unlock(&mm->i915->mm_lock);
 	mutex_unlock(&mm->i915->mm_lock);
 	up_write(&mm->mm->mmap_sem);
 	up_write(&mm->mm->mmap_sem);
 
 
-	if (mn) {
+	if (mn && !IS_ERR(mn)) {
 		destroy_workqueue(mn->wq);
 		destroy_workqueue(mn->wq);
 		kfree(mn);
 		kfree(mn);
 	}
 	}

+ 1 - 3
drivers/gpu/drm/i915/i915_pci.c

@@ -193,7 +193,6 @@ static const struct intel_device_info intel_i965gm_info __initconst = {
 static const struct intel_device_info intel_g45_info __initconst = {
 static const struct intel_device_info intel_g45_info __initconst = {
 	GEN4_FEATURES,
 	GEN4_FEATURES,
 	.platform = INTEL_G45,
 	.platform = INTEL_G45,
-	.has_pipe_cxsr = 1,
 	.ring_mask = RENDER_RING | BSD_RING,
 	.ring_mask = RENDER_RING | BSD_RING,
 };
 };
 
 
@@ -201,7 +200,6 @@ static const struct intel_device_info intel_gm45_info __initconst = {
 	GEN4_FEATURES,
 	GEN4_FEATURES,
 	.platform = INTEL_GM45,
 	.platform = INTEL_GM45,
 	.is_mobile = 1, .has_fbc = 1,
 	.is_mobile = 1, .has_fbc = 1,
-	.has_pipe_cxsr = 1,
 	.supports_tv = 1,
 	.supports_tv = 1,
 	.ring_mask = RENDER_RING | BSD_RING,
 	.ring_mask = RENDER_RING | BSD_RING,
 };
 };
@@ -645,7 +643,7 @@ static void i915_pci_remove(struct pci_dev *pdev)
 	struct drm_device *dev = pci_get_drvdata(pdev);
 	struct drm_device *dev = pci_get_drvdata(pdev);
 
 
 	i915_driver_unload(dev);
 	i915_driver_unload(dev);
-	drm_dev_unref(dev);
+	drm_dev_put(dev);
 }
 }
 
 
 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)

+ 1 - 0
drivers/gpu/drm/i915/i915_pvinfo.h

@@ -53,6 +53,7 @@ enum vgt_g2v_type {
  * VGT capabilities type
  * VGT capabilities type
  */
  */
 #define VGT_CAPS_FULL_48BIT_PPGTT	BIT(2)
 #define VGT_CAPS_FULL_48BIT_PPGTT	BIT(2)
+#define VGT_CAPS_HWSP_EMULATION		BIT(3)
 
 
 struct vgt_if {
 struct vgt_if {
 	u64 magic;		/* VGT_MAGIC */
 	u64 magic;		/* VGT_MAGIC */

+ 1 - 1
drivers/gpu/drm/i915/i915_reg.h

@@ -5242,7 +5242,7 @@ enum {
 #define   DP_AUX_CH_CTL_TIME_OUT_400us	    (0 << 26)
 #define   DP_AUX_CH_CTL_TIME_OUT_400us	    (0 << 26)
 #define   DP_AUX_CH_CTL_TIME_OUT_600us	    (1 << 26)
 #define   DP_AUX_CH_CTL_TIME_OUT_600us	    (1 << 26)
 #define   DP_AUX_CH_CTL_TIME_OUT_800us	    (2 << 26)
 #define   DP_AUX_CH_CTL_TIME_OUT_800us	    (2 << 26)
-#define   DP_AUX_CH_CTL_TIME_OUT_1600us	    (3 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_MAX	    (3 << 26) /* Varies per platform */
 #define   DP_AUX_CH_CTL_TIME_OUT_MASK	    (3 << 26)
 #define   DP_AUX_CH_CTL_TIME_OUT_MASK	    (3 << 26)
 #define   DP_AUX_CH_CTL_RECEIVE_ERROR	    (1 << 25)
 #define   DP_AUX_CH_CTL_RECEIVE_ERROR	    (1 << 25)
 #define   DP_AUX_CH_CTL_MESSAGE_SIZE_MASK    (0x1f << 20)
 #define   DP_AUX_CH_CTL_MESSAGE_SIZE_MASK    (0x1f << 20)

+ 13 - 5
drivers/gpu/drm/i915/i915_sw_fence.c

@@ -41,6 +41,11 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
 	debug_object_init(fence, &i915_sw_fence_debug_descr);
 	debug_object_init(fence, &i915_sw_fence_debug_descr);
 }
 }
 
 
+static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
+{
+	debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr);
+}
+
 static inline void debug_fence_activate(struct i915_sw_fence *fence)
 static inline void debug_fence_activate(struct i915_sw_fence *fence)
 {
 {
 	debug_object_activate(fence, &i915_sw_fence_debug_descr);
 	debug_object_activate(fence, &i915_sw_fence_debug_descr);
@@ -79,6 +84,10 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
 {
 {
 }
 }
 
 
+static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
+{
+}
+
 static inline void debug_fence_activate(struct i915_sw_fence *fence)
 static inline void debug_fence_activate(struct i915_sw_fence *fence)
 {
 {
 }
 }
@@ -360,9 +369,9 @@ struct i915_sw_dma_fence_cb {
 	struct irq_work work;
 	struct irq_work work;
 };
 };
 
 
-static void timer_i915_sw_fence_wake(unsigned long data)
+static void timer_i915_sw_fence_wake(struct timer_list *t)
 {
 {
-	struct i915_sw_dma_fence_cb *cb = (struct i915_sw_dma_fence_cb *)data;
+	struct i915_sw_dma_fence_cb *cb = from_timer(cb, t, timer);
 	struct i915_sw_fence *fence;
 	struct i915_sw_fence *fence;
 
 
 	fence = xchg(&cb->fence, NULL);
 	fence = xchg(&cb->fence, NULL);
@@ -425,9 +434,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
 	i915_sw_fence_await(fence);
 	i915_sw_fence_await(fence);
 
 
 	cb->dma = NULL;
 	cb->dma = NULL;
-	__setup_timer(&cb->timer,
-		      timer_i915_sw_fence_wake, (unsigned long)cb,
-		      TIMER_IRQSAFE);
+	timer_setup(&cb->timer, timer_i915_sw_fence_wake, TIMER_IRQSAFE);
 	init_irq_work(&cb->work, irq_i915_sw_fence_work);
 	init_irq_work(&cb->work, irq_i915_sw_fence_work);
 	if (timeout) {
 	if (timeout) {
 		cb->dma = dma_fence_get(dma);
 		cb->dma = dma_fence_get(dma);
@@ -507,5 +514,6 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 }
 }
 
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/lib_sw_fence.c"
 #include "selftests/i915_sw_fence.c"
 #include "selftests/i915_sw_fence.c"
 #endif
 #endif

+ 6 - 0
drivers/gpu/drm/i915/i915_vgpu.h

@@ -30,6 +30,12 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv);
 
 
 bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv);
 bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv);
 
 
+static inline bool
+intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
+{
+	return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
+}
+
 int intel_vgt_balloon(struct drm_i915_private *dev_priv);
 int intel_vgt_balloon(struct drm_i915_private *dev_priv);
 void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
 void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
 
 

+ 12 - 4
drivers/gpu/drm/i915/i915_vma.c

@@ -58,8 +58,10 @@ i915_vma_retire(struct i915_gem_active *active,
 	 * so that we don't steal from recently used but inactive objects
 	 * so that we don't steal from recently used but inactive objects
 	 * (unless we are forced to ofc!)
 	 * (unless we are forced to ofc!)
 	 */
 	 */
+	spin_lock(&rq->i915->mm.obj_lock);
 	if (obj->bind_count)
 	if (obj->bind_count)
-		list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
+		list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list);
+	spin_unlock(&rq->i915->mm.obj_lock);
 
 
 	obj->mm.dirty = true; /* be paranoid  */
 	obj->mm.dirty = true; /* be paranoid  */
 
 
@@ -563,9 +565,13 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
 
 
-	list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+
+	spin_lock(&dev_priv->mm.obj_lock);
+	list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
 	obj->bind_count++;
 	obj->bind_count++;
+	spin_unlock(&dev_priv->mm.obj_lock);
+
 	GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
 	GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
 
 
 	return 0;
 	return 0;
@@ -580,6 +586,7 @@ err_unpin:
 static void
 static void
 i915_vma_remove(struct i915_vma *vma)
 i915_vma_remove(struct i915_vma *vma)
 {
 {
+	struct drm_i915_private *i915 = vma->vm->i915;
 	struct drm_i915_gem_object *obj = vma->obj;
 	struct drm_i915_gem_object *obj = vma->obj;
 
 
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
@@ -593,9 +600,10 @@ i915_vma_remove(struct i915_vma *vma)
 	/* Since the unbound list is global, only move to that list if
 	/* Since the unbound list is global, only move to that list if
 	 * no more VMAs exist.
 	 * no more VMAs exist.
 	 */
 	 */
+	spin_lock(&i915->mm.obj_lock);
 	if (--obj->bind_count == 0)
 	if (--obj->bind_count == 0)
-		list_move_tail(&obj->global_link,
-			       &to_i915(obj->base.dev)->mm.unbound_list);
+		list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
+	spin_unlock(&i915->mm.obj_lock);
 
 
 	/* And finally now the object is completely decoupled from this vma,
 	/* And finally now the object is completely decoupled from this vma,
 	 * we can drop its hold on the backing storage and allow it to be
 	 * we can drop its hold on the backing storage and allow it to be

+ 62 - 20
drivers/gpu/drm/i915/intel_bios.c

@@ -691,6 +691,48 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
 	dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
 	dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
 }
 }
 
 
+static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv,
+				      u16 version, enum port port)
+{
+	if (!dev_priv->vbt.dsi.config->dual_link || version < 197) {
+		dev_priv->vbt.dsi.bl_ports = BIT(port);
+		if (dev_priv->vbt.dsi.config->cabc_supported)
+			dev_priv->vbt.dsi.cabc_ports = BIT(port);
+
+		return;
+	}
+
+	switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
+	case DL_DCS_PORT_A:
+		dev_priv->vbt.dsi.bl_ports = BIT(PORT_A);
+		break;
+	case DL_DCS_PORT_C:
+		dev_priv->vbt.dsi.bl_ports = BIT(PORT_C);
+		break;
+	default:
+	case DL_DCS_PORT_A_AND_C:
+		dev_priv->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C);
+		break;
+	}
+
+	if (!dev_priv->vbt.dsi.config->cabc_supported)
+		return;
+
+	switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
+	case DL_DCS_PORT_A:
+		dev_priv->vbt.dsi.cabc_ports = BIT(PORT_A);
+		break;
+	case DL_DCS_PORT_C:
+		dev_priv->vbt.dsi.cabc_ports = BIT(PORT_C);
+		break;
+	default:
+	case DL_DCS_PORT_A_AND_C:
+		dev_priv->vbt.dsi.cabc_ports =
+					BIT(PORT_A) | BIT(PORT_C);
+		break;
+	}
+}
+
 static void
 static void
 parse_mipi_config(struct drm_i915_private *dev_priv,
 parse_mipi_config(struct drm_i915_private *dev_priv,
 		  const struct bdb_header *bdb)
 		  const struct bdb_header *bdb)
@@ -699,9 +741,10 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
 	const struct mipi_config *config;
 	const struct mipi_config *config;
 	const struct mipi_pps_data *pps;
 	const struct mipi_pps_data *pps;
 	int panel_type = dev_priv->vbt.panel_type;
 	int panel_type = dev_priv->vbt.panel_type;
+	enum port port;
 
 
 	/* parse MIPI blocks only if LFP type is MIPI */
 	/* parse MIPI blocks only if LFP type is MIPI */
-	if (!intel_bios_is_dsi_present(dev_priv, NULL))
+	if (!intel_bios_is_dsi_present(dev_priv, &port))
 		return;
 		return;
 
 
 	/* Initialize this to undefined indicating no generic MIPI support */
 	/* Initialize this to undefined indicating no generic MIPI support */
@@ -742,15 +785,7 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
 		return;
 		return;
 	}
 	}
 
 
-	/*
-	 * These fields are introduced from the VBT version 197 onwards,
-	 * so making sure that these bits are set zero in the previous
-	 * versions.
-	 */
-	if (dev_priv->vbt.dsi.config->dual_link && bdb->version < 197) {
-		dev_priv->vbt.dsi.config->dl_dcs_cabc_ports = 0;
-		dev_priv->vbt.dsi.config->dl_dcs_backlight_ports = 0;
-	}
+	parse_dsi_backlight_ports(dev_priv, bdb->version, port);
 
 
 	/* We have mandatory mipi config blocks. Initialize as generic panel */
 	/* We have mandatory mipi config blocks. Initialize as generic panel */
 	dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
 	dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
@@ -1071,6 +1106,22 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
 	}
 	}
 }
 }
 
 
+static const u8 cnp_ddc_pin_map[] = {
+	[DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT,
+	[DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT,
+	[DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */
+	[DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */
+};
+
+static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
+{
+	if (HAS_PCH_CNP(dev_priv) &&
+	    vbt_pin > 0 && vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map))
+		return cnp_ddc_pin_map[vbt_pin];
+
+	return vbt_pin;
+}
+
 static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
 static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
 			   u8 bdb_version)
 			   u8 bdb_version)
 {
 {
@@ -1163,16 +1214,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
 		DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
 		DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
 
 
 	if (is_dvi) {
 	if (is_dvi) {
-		info->alternate_ddc_pin = ddc_pin;
-
-		/*
-		 * All VBTs that we got so far for B Stepping has this
-		 * information wrong for Port D. So, let's just ignore for now.
-		 */
-		if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0) &&
-		    port == PORT_D) {
-			info->alternate_ddc_pin = 0;
-		}
+		info->alternate_ddc_pin = map_ddc_pin(dev_priv, ddc_pin);
 
 
 		sanitize_ddc_pin(dev_priv, port);
 		sanitize_ddc_pin(dev_priv, port);
 	}
 	}

+ 8 - 10
drivers/gpu/drm/i915/intel_breadcrumbs.c

@@ -74,9 +74,10 @@ static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
 	set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
 	set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
 }
 }
 
 
-static void intel_breadcrumbs_hangcheck(unsigned long data)
+static void intel_breadcrumbs_hangcheck(struct timer_list *t)
 {
 {
-	struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+	struct intel_engine_cs *engine = from_timer(engine, t,
+						    breadcrumbs.hangcheck);
 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
 
 
 	if (!b->irq_armed)
 	if (!b->irq_armed)
@@ -108,9 +109,10 @@ static void intel_breadcrumbs_hangcheck(unsigned long data)
 	}
 	}
 }
 }
 
 
-static void intel_breadcrumbs_fake_irq(unsigned long data)
+static void intel_breadcrumbs_fake_irq(struct timer_list *t)
 {
 {
-	struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+	struct intel_engine_cs *engine = from_timer(engine, t,
+						    breadcrumbs.fake_irq);
 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
 
 
 	/* The timer persists in case we cannot enable interrupts,
 	/* The timer persists in case we cannot enable interrupts,
@@ -787,12 +789,8 @@ int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
 	spin_lock_init(&b->rb_lock);
 	spin_lock_init(&b->rb_lock);
 	spin_lock_init(&b->irq_lock);
 	spin_lock_init(&b->irq_lock);
 
 
-	setup_timer(&b->fake_irq,
-		    intel_breadcrumbs_fake_irq,
-		    (unsigned long)engine);
-	setup_timer(&b->hangcheck,
-		    intel_breadcrumbs_hangcheck,
-		    (unsigned long)engine);
+	timer_setup(&b->fake_irq, intel_breadcrumbs_fake_irq, 0);
+	timer_setup(&b->hangcheck, intel_breadcrumbs_hangcheck, 0);
 
 
 	/* Spawn a thread to provide a common bottom-half for all signals.
 	/* Spawn a thread to provide a common bottom-half for all signals.
 	 * As this is an asynchronous interface we cannot steal the current
 	 * As this is an asynchronous interface we cannot steal the current

+ 21 - 5
drivers/gpu/drm/i915/intel_crt.c

@@ -343,11 +343,26 @@ intel_crt_mode_valid(struct drm_connector *connector,
 static bool intel_crt_compute_config(struct intel_encoder *encoder,
 static bool intel_crt_compute_config(struct intel_encoder *encoder,
 				     struct intel_crtc_state *pipe_config,
 				     struct intel_crtc_state *pipe_config,
 				     struct drm_connector_state *conn_state)
 				     struct drm_connector_state *conn_state)
+{
+	return true;
+}
+
+static bool pch_crt_compute_config(struct intel_encoder *encoder,
+				   struct intel_crtc_state *pipe_config,
+				   struct drm_connector_state *conn_state)
+{
+	pipe_config->has_pch_encoder = true;
+
+	return true;
+}
+
+static bool hsw_crt_compute_config(struct intel_encoder *encoder,
+				   struct intel_crtc_state *pipe_config,
+				   struct drm_connector_state *conn_state)
 {
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
 
-	if (HAS_PCH_SPLIT(dev_priv))
-		pipe_config->has_pch_encoder = true;
+	pipe_config->has_pch_encoder = true;
 
 
 	/* LPT FDI RX only supports 8bpc. */
 	/* LPT FDI RX only supports 8bpc. */
 	if (HAS_PCH_LPT(dev_priv)) {
 	if (HAS_PCH_LPT(dev_priv)) {
@@ -360,8 +375,7 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
 	}
 	}
 
 
 	/* FDI must always be 2.7 GHz */
 	/* FDI must always be 2.7 GHz */
-	if (HAS_DDI(dev_priv))
-		pipe_config->port_clock = 135000 * 2;
+	pipe_config->port_clock = 135000 * 2;
 
 
 	return true;
 	return true;
 }
 }
@@ -959,11 +973,11 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
 	    !dmi_check_system(intel_spurious_crt_detect))
 	    !dmi_check_system(intel_spurious_crt_detect))
 		crt->base.hpd_pin = HPD_CRT;
 		crt->base.hpd_pin = HPD_CRT;
 
 
-	crt->base.compute_config = intel_crt_compute_config;
 	if (HAS_DDI(dev_priv)) {
 	if (HAS_DDI(dev_priv)) {
 		crt->base.port = PORT_E;
 		crt->base.port = PORT_E;
 		crt->base.get_config = hsw_crt_get_config;
 		crt->base.get_config = hsw_crt_get_config;
 		crt->base.get_hw_state = intel_ddi_get_hw_state;
 		crt->base.get_hw_state = intel_ddi_get_hw_state;
+		crt->base.compute_config = hsw_crt_compute_config;
 		crt->base.pre_pll_enable = hsw_pre_pll_enable_crt;
 		crt->base.pre_pll_enable = hsw_pre_pll_enable_crt;
 		crt->base.pre_enable = hsw_pre_enable_crt;
 		crt->base.pre_enable = hsw_pre_enable_crt;
 		crt->base.enable = hsw_enable_crt;
 		crt->base.enable = hsw_enable_crt;
@@ -971,9 +985,11 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
 		crt->base.post_disable = hsw_post_disable_crt;
 		crt->base.post_disable = hsw_post_disable_crt;
 	} else {
 	} else {
 		if (HAS_PCH_SPLIT(dev_priv)) {
 		if (HAS_PCH_SPLIT(dev_priv)) {
+			crt->base.compute_config = pch_crt_compute_config;
 			crt->base.disable = pch_disable_crt;
 			crt->base.disable = pch_disable_crt;
 			crt->base.post_disable = pch_post_disable_crt;
 			crt->base.post_disable = pch_post_disable_crt;
 		} else {
 		} else {
+			crt->base.compute_config = intel_crt_compute_config;
 			crt->base.disable = intel_disable_crt;
 			crt->base.disable = intel_disable_crt;
 		}
 		}
 		crt->base.port = PORT_NONE;
 		crt->base.port = PORT_NONE;

+ 16 - 15
drivers/gpu/drm/i915/intel_csr.c

@@ -52,10 +52,6 @@ MODULE_FIRMWARE(I915_CSR_SKL);
 MODULE_FIRMWARE(I915_CSR_BXT);
 MODULE_FIRMWARE(I915_CSR_BXT);
 #define BXT_CSR_VERSION_REQUIRED	CSR_VERSION(1, 7)
 #define BXT_CSR_VERSION_REQUIRED	CSR_VERSION(1, 7)
 
 
-#define FIRMWARE_URL  "https://01.org/linuxgraphics/downloads/firmware"
-
-
-
 
 
 #define CSR_MAX_FW_SIZE			0x2FFF
 #define CSR_MAX_FW_SIZE			0x2FFF
 #define CSR_DEFAULT_FW_OFFSET		0xFFFFFFFF
 #define CSR_DEFAULT_FW_OFFSET		0xFFFFFFFF
@@ -291,7 +287,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
 	css_header = (struct intel_css_header *)fw->data;
 	css_header = (struct intel_css_header *)fw->data;
 	if (sizeof(struct intel_css_header) !=
 	if (sizeof(struct intel_css_header) !=
 	    (css_header->header_len * 4)) {
 	    (css_header->header_len * 4)) {
-		DRM_ERROR("Firmware has wrong CSS header length %u bytes\n",
+		DRM_ERROR("DMC firmware has wrong CSS header length "
+			  "(%u bytes)\n",
 			  (css_header->header_len * 4));
 			  (css_header->header_len * 4));
 		return NULL;
 		return NULL;
 	}
 	}
@@ -315,7 +312,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
 
 
 	if (csr->version != required_version) {
 	if (csr->version != required_version) {
 		DRM_INFO("Refusing to load DMC firmware v%u.%u,"
 		DRM_INFO("Refusing to load DMC firmware v%u.%u,"
-			 " please use v%u.%u [" FIRMWARE_URL "].\n",
+			 " please use v%u.%u\n",
 			 CSR_VERSION_MAJOR(csr->version),
 			 CSR_VERSION_MAJOR(csr->version),
 			 CSR_VERSION_MINOR(csr->version),
 			 CSR_VERSION_MINOR(csr->version),
 			 CSR_VERSION_MAJOR(required_version),
 			 CSR_VERSION_MAJOR(required_version),
@@ -330,7 +327,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
 		&fw->data[readcount];
 		&fw->data[readcount];
 	if (sizeof(struct intel_package_header) !=
 	if (sizeof(struct intel_package_header) !=
 	    (package_header->header_len * 4)) {
 	    (package_header->header_len * 4)) {
-		DRM_ERROR("Firmware has wrong package header length %u bytes\n",
+		DRM_ERROR("DMC firmware has wrong package header length "
+			  "(%u bytes)\n",
 			  (package_header->header_len * 4));
 			  (package_header->header_len * 4));
 		return NULL;
 		return NULL;
 	}
 	}
@@ -351,7 +349,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
 			dmc_offset = package_header->fw_info[i].offset;
 			dmc_offset = package_header->fw_info[i].offset;
 	}
 	}
 	if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
 	if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
-		DRM_ERROR("Firmware not supported for %c stepping\n",
+		DRM_ERROR("DMC firmware not supported for %c stepping\n",
 			  si->stepping);
 			  si->stepping);
 		return NULL;
 		return NULL;
 	}
 	}
@@ -360,7 +358,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
 	/* Extract dmc_header information. */
 	/* Extract dmc_header information. */
 	dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
 	dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
 	if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
 	if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
-		DRM_ERROR("Firmware has wrong dmc header length %u bytes\n",
+		DRM_ERROR("DMC firmware has wrong dmc header length "
+			  "(%u bytes)\n",
 			  (dmc_header->header_len));
 			  (dmc_header->header_len));
 		return NULL;
 		return NULL;
 	}
 	}
@@ -368,7 +367,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
 
 
 	/* Cache the dmc header info. */
 	/* Cache the dmc header info. */
 	if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
 	if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
-		DRM_ERROR("Firmware has wrong mmio count %u\n",
+		DRM_ERROR("DMC firmware has wrong mmio count %u\n",
 			  dmc_header->mmio_count);
 			  dmc_header->mmio_count);
 		return NULL;
 		return NULL;
 	}
 	}
@@ -376,7 +375,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
 	for (i = 0; i < dmc_header->mmio_count; i++) {
 	for (i = 0; i < dmc_header->mmio_count; i++) {
 		if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE ||
 		if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE ||
 		    dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
 		    dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
-			DRM_ERROR(" Firmware has wrong mmio address 0x%x\n",
+			DRM_ERROR("DMC firmware has wrong mmio address 0x%x\n",
 				  dmc_header->mmioaddr[i]);
 				  dmc_header->mmioaddr[i]);
 			return NULL;
 			return NULL;
 		}
 		}
@@ -387,7 +386,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
 	/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
 	/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
 	nbytes = dmc_header->fw_size * 4;
 	nbytes = dmc_header->fw_size * 4;
 	if (nbytes > CSR_MAX_FW_SIZE) {
 	if (nbytes > CSR_MAX_FW_SIZE) {
-		DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes);
+		DRM_ERROR("DMC firmware too big (%u bytes)\n", nbytes);
 		return NULL;
 		return NULL;
 	}
 	}
 	csr->dmc_fw_size = dmc_header->fw_size;
 	csr->dmc_fw_size = dmc_header->fw_size;
@@ -425,9 +424,11 @@ static void csr_load_work_fn(struct work_struct *work)
 			 CSR_VERSION_MINOR(csr->version));
 			 CSR_VERSION_MINOR(csr->version));
 	} else {
 	} else {
 		dev_notice(dev_priv->drm.dev,
 		dev_notice(dev_priv->drm.dev,
-			   "Failed to load DMC firmware"
-			   " [" FIRMWARE_URL "],"
-			   " disabling runtime power management.\n");
+			   "Failed to load DMC firmware %s."
+			   " Disabling runtime power management.\n",
+			   csr->fw_path);
+		dev_notice(dev_priv->drm.dev, "DMC firmware homepage: %s",
+			   INTEL_UC_FIRMWARE_URL);
 	}
 	}
 
 
 	release_firmware(fw);
 	release_firmware(fw);

+ 464 - 405
drivers/gpu/drm/i915/intel_ddi.c

@@ -305,35 +305,34 @@ struct bxt_ddi_buf_trans {
 	u8 scale;	/* scale value */
 	u8 scale;	/* scale value */
 	u8 enable;	/* scale enable */
 	u8 enable;	/* scale enable */
 	u8 deemphasis;
 	u8 deemphasis;
-	bool default_index; /* true if the entry represents default value */
 };
 };
 
 
 static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
 static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
 					/* Idx	NT mV diff	db  */
 					/* Idx	NT mV diff	db  */
-	{ 52,  0x9A, 0, 128, true  },	/* 0:	400		0   */
-	{ 78,  0x9A, 0, 85,  false },	/* 1:	400		3.5 */
-	{ 104, 0x9A, 0, 64,  false },	/* 2:	400		6   */
-	{ 154, 0x9A, 0, 43,  false },	/* 3:	400		9.5 */
-	{ 77,  0x9A, 0, 128, false },	/* 4:	600		0   */
-	{ 116, 0x9A, 0, 85,  false },	/* 5:	600		3.5 */
-	{ 154, 0x9A, 0, 64,  false },	/* 6:	600		6   */
-	{ 102, 0x9A, 0, 128, false },	/* 7:	800		0   */
-	{ 154, 0x9A, 0, 85,  false },	/* 8:	800		3.5 */
-	{ 154, 0x9A, 1, 128, false },	/* 9:	1200		0   */
+	{ 52,  0x9A, 0, 128, },	/* 0:	400		0   */
+	{ 78,  0x9A, 0, 85,  },	/* 1:	400		3.5 */
+	{ 104, 0x9A, 0, 64,  },	/* 2:	400		6   */
+	{ 154, 0x9A, 0, 43,  },	/* 3:	400		9.5 */
+	{ 77,  0x9A, 0, 128, },	/* 4:	600		0   */
+	{ 116, 0x9A, 0, 85,  },	/* 5:	600		3.5 */
+	{ 154, 0x9A, 0, 64,  },	/* 6:	600		6   */
+	{ 102, 0x9A, 0, 128, },	/* 7:	800		0   */
+	{ 154, 0x9A, 0, 85,  },	/* 8:	800		3.5 */
+	{ 154, 0x9A, 1, 128, },	/* 9:	1200		0   */
 };
 };
 
 
 static const struct bxt_ddi_buf_trans bxt_ddi_translations_edp[] = {
 static const struct bxt_ddi_buf_trans bxt_ddi_translations_edp[] = {
 					/* Idx	NT mV diff	db  */
 					/* Idx	NT mV diff	db  */
-	{ 26, 0, 0, 128, false },	/* 0:	200		0   */
-	{ 38, 0, 0, 112, false },	/* 1:	200		1.5 */
-	{ 48, 0, 0, 96,  false },	/* 2:	200		4   */
-	{ 54, 0, 0, 69,  false },	/* 3:	200		6   */
-	{ 32, 0, 0, 128, false },	/* 4:	250		0   */
-	{ 48, 0, 0, 104, false },	/* 5:	250		1.5 */
-	{ 54, 0, 0, 85,  false },	/* 6:	250		4   */
-	{ 43, 0, 0, 128, false },	/* 7:	300		0   */
-	{ 54, 0, 0, 101, false },	/* 8:	300		1.5 */
-	{ 48, 0, 0, 128, false },	/* 9:	300		0   */
+	{ 26, 0, 0, 128, },	/* 0:	200		0   */
+	{ 38, 0, 0, 112, },	/* 1:	200		1.5 */
+	{ 48, 0, 0, 96,  },	/* 2:	200		4   */
+	{ 54, 0, 0, 69,  },	/* 3:	200		6   */
+	{ 32, 0, 0, 128, },	/* 4:	250		0   */
+	{ 48, 0, 0, 104, },	/* 5:	250		1.5 */
+	{ 54, 0, 0, 85,  },	/* 6:	250		4   */
+	{ 43, 0, 0, 128, },	/* 7:	300		0   */
+	{ 54, 0, 0, 101, },	/* 8:	300		1.5 */
+	{ 48, 0, 0, 128, },	/* 9:	300		0   */
 };
 };
 
 
 /* BSpec has 2 recommended values - entries 0 and 8.
 /* BSpec has 2 recommended values - entries 0 and 8.
@@ -341,16 +340,16 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_edp[] = {
  */
  */
 static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
 static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
 					/* Idx	NT mV diff	db  */
 					/* Idx	NT mV diff	db  */
-	{ 52,  0x9A, 0, 128, false },	/* 0:	400		0   */
-	{ 52,  0x9A, 0, 85,  false },	/* 1:	400		3.5 */
-	{ 52,  0x9A, 0, 64,  false },	/* 2:	400		6   */
-	{ 42,  0x9A, 0, 43,  false },	/* 3:	400		9.5 */
-	{ 77,  0x9A, 0, 128, false },	/* 4:	600		0   */
-	{ 77,  0x9A, 0, 85,  false },	/* 5:	600		3.5 */
-	{ 77,  0x9A, 0, 64,  false },	/* 6:	600		6   */
-	{ 102, 0x9A, 0, 128, false },	/* 7:	800		0   */
-	{ 102, 0x9A, 0, 85,  false },	/* 8:	800		3.5 */
-	{ 154, 0x9A, 1, 128, true },	/* 9:	1200		0   */
+	{ 52,  0x9A, 0, 128, },	/* 0:	400		0   */
+	{ 52,  0x9A, 0, 85,  },	/* 1:	400		3.5 */
+	{ 52,  0x9A, 0, 64,  },	/* 2:	400		6   */
+	{ 42,  0x9A, 0, 43,  },	/* 3:	400		9.5 */
+	{ 77,  0x9A, 0, 128, },	/* 4:	600		0   */
+	{ 77,  0x9A, 0, 85,  },	/* 5:	600		3.5 */
+	{ 77,  0x9A, 0, 64,  },	/* 6:	600		6   */
+	{ 102, 0x9A, 0, 128, },	/* 7:	800		0   */
+	{ 102, 0x9A, 0, 85,  },	/* 8:	800		3.5 */
+	{ 154, 0x9A, 1, 128, },	/* 9:	1200		0   */
 };
 };
 
 
 struct cnl_ddi_buf_trans {
 struct cnl_ddi_buf_trans {
@@ -588,6 +587,120 @@ skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
 	}
 	}
 }
 }
 
 
+static int skl_buf_trans_num_entries(enum port port, int n_entries)
+{
+	/* Only DDIA and DDIE can select the 10th register with DP */
+	if (port == PORT_A || port == PORT_E)
+		return min(n_entries, 10);
+	else
+		return min(n_entries, 9);
+}
+
+static const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv,
+			   enum port port, int *n_entries)
+{
+	if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
+		const struct ddi_buf_trans *ddi_translations =
+			kbl_get_buf_trans_dp(dev_priv, n_entries);
+		*n_entries = skl_buf_trans_num_entries(port, *n_entries);
+		return ddi_translations;
+	} else if (IS_SKYLAKE(dev_priv)) {
+		const struct ddi_buf_trans *ddi_translations =
+			skl_get_buf_trans_dp(dev_priv, n_entries);
+		*n_entries = skl_buf_trans_num_entries(port, *n_entries);
+		return ddi_translations;
+	} else if (IS_BROADWELL(dev_priv)) {
+		*n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+		return  bdw_ddi_translations_dp;
+	} else if (IS_HASWELL(dev_priv)) {
+		*n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
+		return hsw_ddi_translations_dp;
+	}
+
+	*n_entries = 0;
+	return NULL;
+}
+
+static const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_edp(struct drm_i915_private *dev_priv,
+			    enum port port, int *n_entries)
+{
+	if (IS_GEN9_BC(dev_priv)) {
+		const struct ddi_buf_trans *ddi_translations =
+			skl_get_buf_trans_edp(dev_priv, n_entries);
+		*n_entries = skl_buf_trans_num_entries(port, *n_entries);
+		return ddi_translations;
+	} else if (IS_BROADWELL(dev_priv)) {
+		return bdw_get_buf_trans_edp(dev_priv, n_entries);
+	} else if (IS_HASWELL(dev_priv)) {
+		*n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
+		return hsw_ddi_translations_dp;
+	}
+
+	*n_entries = 0;
+	return NULL;
+}
+
+static const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
+			    int *n_entries)
+{
+	if (IS_BROADWELL(dev_priv)) {
+		*n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi);
+		return bdw_ddi_translations_fdi;
+	} else if (IS_HASWELL(dev_priv)) {
+		*n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
+		return hsw_ddi_translations_fdi;
+	}
+
+	*n_entries = 0;
+	return NULL;
+}
+
+static const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_hdmi(struct drm_i915_private *dev_priv,
+			     int *n_entries)
+{
+	if (IS_GEN9_BC(dev_priv)) {
+		return skl_get_buf_trans_hdmi(dev_priv, n_entries);
+	} else if (IS_BROADWELL(dev_priv)) {
+		*n_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+		return bdw_ddi_translations_hdmi;
+	} else if (IS_HASWELL(dev_priv)) {
+		*n_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
+		return hsw_ddi_translations_hdmi;
+	}
+
+	*n_entries = 0;
+	return NULL;
+}
+
+static const struct bxt_ddi_buf_trans *
+bxt_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+{
+	*n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
+	return bxt_ddi_translations_dp;
+}
+
+static const struct bxt_ddi_buf_trans *
+bxt_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
+{
+	if (dev_priv->vbt.edp.low_vswing) {
+		*n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
+		return bxt_ddi_translations_edp;
+	}
+
+	return bxt_get_buf_trans_dp(dev_priv, n_entries);
+}
+
+static const struct bxt_ddi_buf_trans *
+bxt_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
+{
+	*n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi);
+	return bxt_ddi_translations_hdmi;
+}
+
 static const struct cnl_ddi_buf_trans *
 static const struct cnl_ddi_buf_trans *
 cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
 cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
 {
 {
@@ -657,92 +770,40 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
 
 
 static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
 static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
 {
 {
-	int n_hdmi_entries;
-	int hdmi_level;
-	int hdmi_default_entry;
-
-	hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+	int n_entries, level, default_entry;
 
 
-	if (IS_GEN9_LP(dev_priv))
-		return hdmi_level;
+	level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
 
 
 	if (IS_CANNONLAKE(dev_priv)) {
 	if (IS_CANNONLAKE(dev_priv)) {
-		cnl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
-		hdmi_default_entry = n_hdmi_entries - 1;
+		cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
+		default_entry = n_entries - 1;
+	} else if (IS_GEN9_LP(dev_priv)) {
+		bxt_get_buf_trans_hdmi(dev_priv, &n_entries);
+		default_entry = n_entries - 1;
 	} else if (IS_GEN9_BC(dev_priv)) {
 	} else if (IS_GEN9_BC(dev_priv)) {
-		skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
-		hdmi_default_entry = 8;
+		intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+		default_entry = 8;
 	} else if (IS_BROADWELL(dev_priv)) {
 	} else if (IS_BROADWELL(dev_priv)) {
-		n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
-		hdmi_default_entry = 7;
+		intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+		default_entry = 7;
 	} else if (IS_HASWELL(dev_priv)) {
 	} else if (IS_HASWELL(dev_priv)) {
-		n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
-		hdmi_default_entry = 6;
+		intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+		default_entry = 6;
 	} else {
 	} else {
 		WARN(1, "ddi translation table missing\n");
 		WARN(1, "ddi translation table missing\n");
-		n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
-		hdmi_default_entry = 7;
+		return 0;
 	}
 	}
 
 
 	/* Choose a good default if VBT is badly populated */
 	/* Choose a good default if VBT is badly populated */
-	if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
-	    hdmi_level >= n_hdmi_entries)
-		hdmi_level = hdmi_default_entry;
-
-	return hdmi_level;
-}
-
-static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv,
-			   int *n_entries)
-{
-	if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
-		return kbl_get_buf_trans_dp(dev_priv, n_entries);
-	} else if (IS_SKYLAKE(dev_priv)) {
-		return skl_get_buf_trans_dp(dev_priv, n_entries);
-	} else if (IS_BROADWELL(dev_priv)) {
-		*n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
-		return  bdw_ddi_translations_dp;
-	} else if (IS_HASWELL(dev_priv)) {
-		*n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
-		return hsw_ddi_translations_dp;
-	}
-
-	*n_entries = 0;
-	return NULL;
-}
-
-static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_edp(struct drm_i915_private *dev_priv,
-			    int *n_entries)
-{
-	if (IS_GEN9_BC(dev_priv)) {
-		return skl_get_buf_trans_edp(dev_priv, n_entries);
-	} else if (IS_BROADWELL(dev_priv)) {
-		return bdw_get_buf_trans_edp(dev_priv, n_entries);
-	} else if (IS_HASWELL(dev_priv)) {
-		*n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
-		return hsw_ddi_translations_dp;
-	}
-
-	*n_entries = 0;
-	return NULL;
-}
+	if (level == HDMI_LEVEL_SHIFT_UNKNOWN || level >= n_entries)
+		level = default_entry;
 
 
-static const struct ddi_buf_trans *
-intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
-			    int *n_entries)
-{
-	if (IS_BROADWELL(dev_priv)) {
-		*n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi);
-		return bdw_ddi_translations_fdi;
-	} else if (IS_HASWELL(dev_priv)) {
-		*n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
-		return hsw_ddi_translations_fdi;
-	}
+	if (WARN_ON_ONCE(n_entries == 0))
+		return 0;
+	if (WARN_ON_ONCE(level >= n_entries))
+		level = n_entries - 1;
 
 
-	*n_entries = 0;
-	return NULL;
+	return level;
 }
 }
 
 
 /*
 /*
@@ -760,11 +821,11 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
 
 
 	switch (encoder->type) {
 	switch (encoder->type) {
 	case INTEL_OUTPUT_EDP:
 	case INTEL_OUTPUT_EDP:
-		ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv,
+		ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port,
 							       &n_entries);
 							       &n_entries);
 		break;
 		break;
 	case INTEL_OUTPUT_DP:
 	case INTEL_OUTPUT_DP:
-		ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv,
+		ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port,
 							      &n_entries);
 							      &n_entries);
 		break;
 		break;
 	case INTEL_OUTPUT_ANALOG:
 	case INTEL_OUTPUT_ANALOG:
@@ -776,16 +837,10 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
 		return;
 		return;
 	}
 	}
 
 
-	if (IS_GEN9_BC(dev_priv)) {
-		/* If we're boosting the current, set bit 31 of trans1 */
-		if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
-			iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
-
-		if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
-			    port != PORT_A && port != PORT_E &&
-			    n_entries > 9))
-			n_entries = 9;
-	}
+	/* If we're boosting the current, set bit 31 of trans1 */
+	if (IS_GEN9_BC(dev_priv) &&
+	    dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+		iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
 
 
 	for (i = 0; i < n_entries; i++) {
 	for (i = 0; i < n_entries; i++) {
 		I915_WRITE(DDI_BUF_TRANS_LO(port, i),
 		I915_WRITE(DDI_BUF_TRANS_LO(port, i),
@@ -800,39 +855,32 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
  * values in advance. This function programs the correct values for
  * values in advance. This function programs the correct values for
  * HDMI/DVI use cases.
  * HDMI/DVI use cases.
  */
  */
-static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder)
+static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
+					   int level)
 {
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	u32 iboost_bit = 0;
 	u32 iboost_bit = 0;
-	int n_hdmi_entries, hdmi_level;
+	int n_entries;
 	enum port port = intel_ddi_get_encoder_port(encoder);
 	enum port port = intel_ddi_get_encoder_port(encoder);
-	const struct ddi_buf_trans *ddi_translations_hdmi;
+	const struct ddi_buf_trans *ddi_translations;
 
 
-	hdmi_level = intel_ddi_hdmi_level(dev_priv, port);
+	ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
 
 
-	if (IS_GEN9_BC(dev_priv)) {
-		ddi_translations_hdmi = skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
+	if (WARN_ON_ONCE(!ddi_translations))
+		return;
+	if (WARN_ON_ONCE(level >= n_entries))
+		level = n_entries - 1;
 
 
-		/* If we're boosting the current, set bit 31 of trans1 */
-		if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level)
-			iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
-	} else if (IS_BROADWELL(dev_priv)) {
-		ddi_translations_hdmi = bdw_ddi_translations_hdmi;
-		n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
-	} else if (IS_HASWELL(dev_priv)) {
-		ddi_translations_hdmi = hsw_ddi_translations_hdmi;
-		n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
-	} else {
-		WARN(1, "ddi translation table missing\n");
-		ddi_translations_hdmi = bdw_ddi_translations_hdmi;
-		n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
-	}
+	/* If we're boosting the current, set bit 31 of trans1 */
+	if (IS_GEN9_BC(dev_priv) &&
+	    dev_priv->vbt.ddi_port_info[port].hdmi_boost_level)
+		iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
 
 
 	/* Entry 9 is for HDMI: */
 	/* Entry 9 is for HDMI: */
 	I915_WRITE(DDI_BUF_TRANS_LO(port, 9),
 	I915_WRITE(DDI_BUF_TRANS_LO(port, 9),
-		   ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
+		   ddi_translations[level].trans1 | iboost_bit);
 	I915_WRITE(DDI_BUF_TRANS_HI(port, 9),
 	I915_WRITE(DDI_BUF_TRANS_HI(port, 9),
-		   ddi_translations_hdmi[hdmi_level].trans2);
+		   ddi_translations[level].trans2);
 }
 }
 
 
 static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
 static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
@@ -1108,14 +1156,14 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
 }
 }
 
 
 static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
 static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
-			       uint32_t dpll)
+			       enum intel_dpll_id pll_id)
 {
 {
 	i915_reg_t cfgcr1_reg, cfgcr2_reg;
 	i915_reg_t cfgcr1_reg, cfgcr2_reg;
 	uint32_t cfgcr1_val, cfgcr2_val;
 	uint32_t cfgcr1_val, cfgcr2_val;
 	uint32_t p0, p1, p2, dco_freq;
 	uint32_t p0, p1, p2, dco_freq;
 
 
-	cfgcr1_reg = DPLL_CFGCR1(dpll);
-	cfgcr2_reg = DPLL_CFGCR2(dpll);
+	cfgcr1_reg = DPLL_CFGCR1(pll_id);
+	cfgcr2_reg = DPLL_CFGCR2(pll_id);
 
 
 	cfgcr1_val = I915_READ(cfgcr1_reg);
 	cfgcr1_val = I915_READ(cfgcr1_reg);
 	cfgcr2_val = I915_READ(cfgcr2_reg);
 	cfgcr2_val = I915_READ(cfgcr2_reg);
@@ -1168,7 +1216,7 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
 }
 }
 
 
 static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
 static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
-			       uint32_t pll_id)
+			       enum intel_dpll_id pll_id)
 {
 {
 	uint32_t cfgcr0, cfgcr1;
 	uint32_t cfgcr0, cfgcr1;
 	uint32_t p0, p1, p2, dco_freq, ref_clock;
 	uint32_t p0, p1, p2, dco_freq, ref_clock;
@@ -1255,7 +1303,8 @@ static void cnl_ddi_clock_get(struct intel_encoder *encoder,
 {
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	int link_clock = 0;
 	int link_clock = 0;
-	uint32_t cfgcr0, pll_id;
+	uint32_t cfgcr0;
+	enum intel_dpll_id pll_id;
 
 
 	pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
 	pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
 
 
@@ -1308,17 +1357,18 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
 {
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	int link_clock = 0;
 	int link_clock = 0;
-	uint32_t dpll_ctl1, dpll;
+	uint32_t dpll_ctl1;
+	enum intel_dpll_id pll_id;
 
 
-	dpll = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+	pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
 
 
 	dpll_ctl1 = I915_READ(DPLL_CTRL1);
 	dpll_ctl1 = I915_READ(DPLL_CTRL1);
 
 
-	if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(dpll)) {
-		link_clock = skl_calc_wrpll_link(dev_priv, dpll);
+	if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(pll_id)) {
+		link_clock = skl_calc_wrpll_link(dev_priv, pll_id);
 	} else {
 	} else {
-		link_clock = dpll_ctl1 & DPLL_CTRL1_LINK_RATE_MASK(dpll);
-		link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(dpll);
+		link_clock = dpll_ctl1 & DPLL_CTRL1_LINK_RATE_MASK(pll_id);
+		link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(pll_id);
 
 
 		switch (link_clock) {
 		switch (link_clock) {
 		case DPLL_CTRL1_LINK_RATE_810:
 		case DPLL_CTRL1_LINK_RATE_810:
@@ -1399,17 +1449,17 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
 }
 }
 
 
 static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
 static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
-				enum intel_dpll_id dpll)
+			     enum intel_dpll_id pll_id)
 {
 {
 	struct intel_shared_dpll *pll;
 	struct intel_shared_dpll *pll;
 	struct intel_dpll_hw_state *state;
 	struct intel_dpll_hw_state *state;
 	struct dpll clock;
 	struct dpll clock;
 
 
 	/* For DDI ports we always use a shared PLL. */
 	/* For DDI ports we always use a shared PLL. */
-	if (WARN_ON(dpll == DPLL_ID_PRIVATE))
+	if (WARN_ON(pll_id == DPLL_ID_PRIVATE))
 		return 0;
 		return 0;
 
 
-	pll = &dev_priv->shared_dplls[dpll];
+	pll = &dev_priv->shared_dplls[pll_id];
 	state = &pll->state.hw_state;
 	state = &pll->state.hw_state;
 
 
 	clock.m1 = 2;
 	clock.m1 = 2;
@@ -1428,9 +1478,9 @@ static void bxt_ddi_clock_get(struct intel_encoder *encoder,
 {
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum port port = intel_ddi_get_encoder_port(encoder);
 	enum port port = intel_ddi_get_encoder_port(encoder);
-	uint32_t dpll = port;
+	enum intel_dpll_id pll_id = port;
 
 
-	pipe_config->port_clock = bxt_calc_pll_link(dev_priv, dpll);
+	pipe_config->port_clock = bxt_calc_pll_link(dev_priv, pll_id);
 
 
 	ddi_dotclock_get(pipe_config);
 	ddi_dotclock_get(pipe_config);
 }
 }
@@ -1782,54 +1832,36 @@ static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
 	I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp);
 	I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp);
 }
 }
 
 
-static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
+static void skl_ddi_set_iboost(struct intel_encoder *encoder,
+			       int level, enum intel_output_type type)
 {
 {
 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
 	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
 	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
 	enum port port = intel_dig_port->port;
 	enum port port = intel_dig_port->port;
-	int type = encoder->type;
-	const struct ddi_buf_trans *ddi_translations;
 	uint8_t iboost;
 	uint8_t iboost;
-	uint8_t dp_iboost, hdmi_iboost;
-	int n_entries;
 
 
-	/* VBT may override standard boost values */
-	dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
-	hdmi_iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
+	if (type == INTEL_OUTPUT_HDMI)
+		iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
+	else
+		iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
 
 
-	if (type == INTEL_OUTPUT_DP) {
-		if (dp_iboost) {
-			iboost = dp_iboost;
-		} else {
-			if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
-				ddi_translations = kbl_get_buf_trans_dp(dev_priv,
-									&n_entries);
-			else
-				ddi_translations = skl_get_buf_trans_dp(dev_priv,
-									&n_entries);
-			iboost = ddi_translations[level].i_boost;
-		}
-	} else if (type == INTEL_OUTPUT_EDP) {
-		if (dp_iboost) {
-			iboost = dp_iboost;
-		} else {
-			ddi_translations = skl_get_buf_trans_edp(dev_priv, &n_entries);
+	if (iboost == 0) {
+		const struct ddi_buf_trans *ddi_translations;
+		int n_entries;
 
 
-			if (WARN_ON(port != PORT_A &&
-				    port != PORT_E && n_entries > 9))
-				n_entries = 9;
+		if (type == INTEL_OUTPUT_HDMI)
+			ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+		else if (type == INTEL_OUTPUT_EDP)
+			ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port, &n_entries);
+		else
+			ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
 
 
-			iboost = ddi_translations[level].i_boost;
-		}
-	} else if (type == INTEL_OUTPUT_HDMI) {
-		if (hdmi_iboost) {
-			iboost = hdmi_iboost;
-		} else {
-			ddi_translations = skl_get_buf_trans_hdmi(dev_priv, &n_entries);
-			iboost = ddi_translations[level].i_boost;
-		}
-	} else {
-		return;
+		if (WARN_ON_ONCE(!ddi_translations))
+			return;
+		if (WARN_ON_ONCE(level >= n_entries))
+			level = n_entries - 1;
+
+		iboost = ddi_translations[level].i_boost;
 	}
 	}
 
 
 	/* Make sure that the requested I_boost is valid */
 	/* Make sure that the requested I_boost is valid */
@@ -1844,38 +1876,25 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
 		_skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
 		_skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
 }
 }
 
 
-static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
-				    u32 level, enum port port, int type)
+static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
+				    int level, enum intel_output_type type)
 {
 {
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	const struct bxt_ddi_buf_trans *ddi_translations;
 	const struct bxt_ddi_buf_trans *ddi_translations;
-	u32 n_entries, i;
-
-	if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
-		n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
-		ddi_translations = bxt_ddi_translations_edp;
-	} else if (type == INTEL_OUTPUT_DP
-			|| type == INTEL_OUTPUT_EDP) {
-		n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
-		ddi_translations = bxt_ddi_translations_dp;
-	} else if (type == INTEL_OUTPUT_HDMI) {
-		n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi);
-		ddi_translations = bxt_ddi_translations_hdmi;
-	} else {
-		DRM_DEBUG_KMS("Vswing programming not done for encoder %d\n",
-				type);
-		return;
-	}
+	enum port port = encoder->port;
+	int n_entries;
 
 
-	/* Check if default value has to be used */
-	if (level >= n_entries ||
-	    (type == INTEL_OUTPUT_HDMI && level == HDMI_LEVEL_SHIFT_UNKNOWN)) {
-		for (i = 0; i < n_entries; i++) {
-			if (ddi_translations[i].default_index) {
-				level = i;
-				break;
-			}
-		}
-	}
+	if (type == INTEL_OUTPUT_HDMI)
+		ddi_translations = bxt_get_buf_trans_hdmi(dev_priv, &n_entries);
+	else if (type == INTEL_OUTPUT_EDP)
+		ddi_translations = bxt_get_buf_trans_edp(dev_priv, &n_entries);
+	else
+		ddi_translations = bxt_get_buf_trans_dp(dev_priv, &n_entries);
+
+	if (WARN_ON_ONCE(!ddi_translations))
+		return;
+	if (WARN_ON_ONCE(level >= n_entries))
+		level = n_entries - 1;
 
 
 	bxt_ddi_phy_set_signal_level(dev_priv, port,
 	bxt_ddi_phy_set_signal_level(dev_priv, port,
 				     ddi_translations[level].margin,
 				     ddi_translations[level].margin,
@@ -1887,6 +1906,7 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
 u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
 u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
 {
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum port port = encoder->port;
 	int n_entries;
 	int n_entries;
 
 
 	if (IS_CANNONLAKE(dev_priv)) {
 	if (IS_CANNONLAKE(dev_priv)) {
@@ -1894,11 +1914,16 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
 			cnl_get_buf_trans_edp(dev_priv, &n_entries);
 			cnl_get_buf_trans_edp(dev_priv, &n_entries);
 		else
 		else
 			cnl_get_buf_trans_dp(dev_priv, &n_entries);
 			cnl_get_buf_trans_dp(dev_priv, &n_entries);
+	} else if (IS_GEN9_LP(dev_priv)) {
+		if (encoder->type == INTEL_OUTPUT_EDP)
+			bxt_get_buf_trans_edp(dev_priv, &n_entries);
+		else
+			bxt_get_buf_trans_dp(dev_priv, &n_entries);
 	} else {
 	} else {
 		if (encoder->type == INTEL_OUTPUT_EDP)
 		if (encoder->type == INTEL_OUTPUT_EDP)
-			intel_ddi_get_buf_trans_edp(dev_priv, &n_entries);
+			intel_ddi_get_buf_trans_edp(dev_priv, port, &n_entries);
 		else
 		else
-			intel_ddi_get_buf_trans_dp(dev_priv, &n_entries);
+			intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
 	}
 	}
 
 
 	if (WARN_ON(n_entries < 1))
 	if (WARN_ON(n_entries < 1))
@@ -1910,28 +1935,26 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
 		DP_TRAIN_VOLTAGE_SWING_MASK;
 		DP_TRAIN_VOLTAGE_SWING_MASK;
 }
 }
 
 
-static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv,
-				    u32 level, enum port port, int type)
+static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
+				   int level, enum intel_output_type type)
 {
 {
-	const struct cnl_ddi_buf_trans *ddi_translations = NULL;
-	u32 n_entries, val;
-	int ln;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum port port = intel_ddi_get_encoder_port(encoder);
+	const struct cnl_ddi_buf_trans *ddi_translations;
+	int n_entries, ln;
+	u32 val;
 
 
-	if (type == INTEL_OUTPUT_HDMI) {
+	if (type == INTEL_OUTPUT_HDMI)
 		ddi_translations = cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
 		ddi_translations = cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
-	} else if (type == INTEL_OUTPUT_DP) {
-		ddi_translations = cnl_get_buf_trans_dp(dev_priv, &n_entries);
-	} else if (type == INTEL_OUTPUT_EDP) {
+	else if (type == INTEL_OUTPUT_EDP)
 		ddi_translations = cnl_get_buf_trans_edp(dev_priv, &n_entries);
 		ddi_translations = cnl_get_buf_trans_edp(dev_priv, &n_entries);
-	}
+	else
+		ddi_translations = cnl_get_buf_trans_dp(dev_priv, &n_entries);
 
 
-	if (WARN_ON(ddi_translations == NULL))
+	if (WARN_ON_ONCE(!ddi_translations))
 		return;
 		return;
-
-	if (level >= n_entries) {
-		DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.", level, n_entries - 1);
+	if (WARN_ON_ONCE(level >= n_entries))
 		level = n_entries - 1;
 		level = n_entries - 1;
-	}
 
 
 	/* Set PORT_TX_DW5 Scaling Mode Sel to 010b. */
 	/* Set PORT_TX_DW5 Scaling Mode Sel to 010b. */
 	val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
 	val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
@@ -1976,26 +1999,22 @@ static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv,
 	I915_WRITE(CNL_PORT_TX_DW7_GRP(port), val);
 	I915_WRITE(CNL_PORT_TX_DW7_GRP(port), val);
 }
 }
 
 
-static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level)
+static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
+				    int level, enum intel_output_type type)
 {
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 	enum port port = intel_ddi_get_encoder_port(encoder);
 	enum port port = intel_ddi_get_encoder_port(encoder);
-	int type = encoder->type;
-	int width = 0;
-	int rate = 0;
+	int width, rate, ln;
 	u32 val;
 	u32 val;
-	int ln = 0;
 
 
-	if ((intel_dp) && (type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP)) {
-		width = intel_dp->lane_count;
-		rate = intel_dp->link_rate;
-	} else if (type == INTEL_OUTPUT_HDMI) {
+	if (type == INTEL_OUTPUT_HDMI) {
 		width = 4;
 		width = 4;
-		/* Rate is always < than 6GHz for HDMI */
+		rate = 0; /* Rate is always < than 6GHz for HDMI */
 	} else {
 	} else {
-		MISSING_CASE(type);
-		return;
+		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+		width = intel_dp->lane_count;
+		rate = intel_dp->link_rate;
 	}
 	}
 
 
 	/*
 	/*
@@ -2004,7 +2023,7 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level)
 	 * else clear to 0b.
 	 * else clear to 0b.
 	 */
 	 */
 	val = I915_READ(CNL_PORT_PCS_DW1_LN0(port));
 	val = I915_READ(CNL_PORT_PCS_DW1_LN0(port));
-	if (type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP)
+	if (type != INTEL_OUTPUT_HDMI)
 		val |= COMMON_KEEPER_EN;
 		val |= COMMON_KEEPER_EN;
 	else
 	else
 		val &= ~COMMON_KEEPER_EN;
 		val &= ~COMMON_KEEPER_EN;
@@ -2039,7 +2058,7 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level)
 	I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
 	I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
 
 
 	/* 5. Program swing and de-emphasis */
 	/* 5. Program swing and de-emphasis */
-	cnl_ddi_vswing_program(dev_priv, level, port, type);
+	cnl_ddi_vswing_program(encoder, level, type);
 
 
 	/* 6. Set training enable to trigger update */
 	/* 6. Set training enable to trigger update */
 	val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
 	val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
@@ -2076,13 +2095,12 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp)
 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
 	struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
 	struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
 	struct intel_encoder *encoder = &dport->base;
 	struct intel_encoder *encoder = &dport->base;
-	enum port port = dport->port;
-	u32 level = intel_ddi_dp_level(intel_dp);
+	int level = intel_ddi_dp_level(intel_dp);
 
 
 	if (IS_CANNONLAKE(dev_priv))
 	if (IS_CANNONLAKE(dev_priv))
-		cnl_ddi_vswing_sequence(encoder, level);
+		cnl_ddi_vswing_sequence(encoder, level, encoder->type);
 	else
 	else
-		bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
+		bxt_ddi_vswing_sequence(encoder, level, encoder->type);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -2092,10 +2110,10 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
 	struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
 	struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
 	struct intel_encoder *encoder = &dport->base;
 	struct intel_encoder *encoder = &dport->base;
-	uint32_t level = intel_ddi_dp_level(intel_dp);
+	int level = intel_ddi_dp_level(intel_dp);
 
 
 	if (IS_GEN9_BC(dev_priv))
 	if (IS_GEN9_BC(dev_priv))
-	    skl_ddi_set_iboost(encoder, level);
+		skl_ddi_set_iboost(encoder, level, encoder->type);
 
 
 	return DDI_BUF_TRANS_SELECT(level);
 	return DDI_BUF_TRANS_SELECT(level);
 }
 }
@@ -2140,37 +2158,52 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
 	}
 	}
 }
 }
 
 
+static void intel_ddi_clk_disable(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum port port = intel_ddi_get_encoder_port(encoder);
+
+	if (IS_CANNONLAKE(dev_priv))
+		I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
+			   DPCLKA_CFGCR0_DDI_CLK_OFF(port));
+	else if (IS_GEN9_BC(dev_priv))
+		I915_WRITE(DPLL_CTRL2, I915_READ(DPLL_CTRL2) |
+			   DPLL_CTRL2_DDI_CLK_OFF(port));
+	else if (INTEL_GEN(dev_priv) < 9)
+		I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+}
+
 static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
 static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
-				    int link_rate, uint32_t lane_count,
-				    struct intel_shared_dpll *pll,
-				    bool link_mst)
+				    const struct intel_crtc_state *crtc_state,
+				    const struct drm_connector_state *conn_state)
 {
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum port port = intel_ddi_get_encoder_port(encoder);
 	enum port port = intel_ddi_get_encoder_port(encoder);
 	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
 	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
-	uint32_t level = intel_ddi_dp_level(intel_dp);
+	bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+	int level = intel_ddi_dp_level(intel_dp);
 
 
-	WARN_ON(link_mst && (port == PORT_A || port == PORT_E));
+	WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
 
 
-	intel_dp_set_link_params(intel_dp, link_rate, lane_count,
-				 link_mst);
-	if (encoder->type == INTEL_OUTPUT_EDP)
-		intel_edp_panel_on(intel_dp);
+	intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
+				 crtc_state->lane_count, is_mst);
 
 
-	intel_ddi_clk_select(encoder, pll);
+	intel_edp_panel_on(intel_dp);
+
+	intel_ddi_clk_select(encoder, crtc_state->shared_dpll);
 
 
 	intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
 	intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
 
 
 	if (IS_CANNONLAKE(dev_priv))
 	if (IS_CANNONLAKE(dev_priv))
-		cnl_ddi_vswing_sequence(encoder, level);
+		cnl_ddi_vswing_sequence(encoder, level, encoder->type);
 	else if (IS_GEN9_LP(dev_priv))
 	else if (IS_GEN9_LP(dev_priv))
-		bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
+		bxt_ddi_vswing_sequence(encoder, level, encoder->type);
 	else
 	else
 		intel_prepare_dp_ddi_buffers(encoder);
 		intel_prepare_dp_ddi_buffers(encoder);
 
 
 	intel_ddi_init_dp_buf_reg(encoder);
 	intel_ddi_init_dp_buf_reg(encoder);
-	if (!link_mst)
+	if (!is_mst)
 		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
 		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
 	intel_dp_start_link_train(intel_dp);
 	intel_dp_start_link_train(intel_dp);
 	if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
 	if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
@@ -2178,10 +2211,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
 }
 }
 
 
 static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
 static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
-				      bool has_infoframe,
 				      const struct intel_crtc_state *crtc_state,
 				      const struct intel_crtc_state *crtc_state,
-				      const struct drm_connector_state *conn_state,
-				      const struct intel_shared_dpll *pll)
+				      const struct drm_connector_state *conn_state)
 {
 {
 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
 	struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
 	struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
@@ -2191,84 +2222,49 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
 	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
 	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
 
 
 	intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
 	intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
-	intel_ddi_clk_select(encoder, pll);
+	intel_ddi_clk_select(encoder, crtc_state->shared_dpll);
 
 
 	intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
 	intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
 
 
 	if (IS_CANNONLAKE(dev_priv))
 	if (IS_CANNONLAKE(dev_priv))
-		cnl_ddi_vswing_sequence(encoder, level);
+		cnl_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI);
 	else if (IS_GEN9_LP(dev_priv))
 	else if (IS_GEN9_LP(dev_priv))
-		bxt_ddi_vswing_sequence(dev_priv, level, port,
-					INTEL_OUTPUT_HDMI);
+		bxt_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI);
 	else
 	else
-		intel_prepare_hdmi_ddi_buffers(encoder);
+		intel_prepare_hdmi_ddi_buffers(encoder, level);
 
 
 	if (IS_GEN9_BC(dev_priv))
 	if (IS_GEN9_BC(dev_priv))
-		skl_ddi_set_iboost(encoder, level);
+		skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
 
 
 	intel_dig_port->set_infoframes(&encoder->base,
 	intel_dig_port->set_infoframes(&encoder->base,
-				       has_infoframe,
+				       crtc_state->has_infoframe,
 				       crtc_state, conn_state);
 				       crtc_state, conn_state);
 }
 }
 
 
 static void intel_ddi_pre_enable(struct intel_encoder *encoder,
 static void intel_ddi_pre_enable(struct intel_encoder *encoder,
-				 const struct intel_crtc_state *pipe_config,
+				 const struct intel_crtc_state *crtc_state,
 				 const struct drm_connector_state *conn_state)
 				 const struct drm_connector_state *conn_state)
 {
 {
-	struct drm_crtc *crtc = pipe_config->base.crtc;
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int pipe = intel_crtc->pipe;
-	int type = encoder->type;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
 
 
-	WARN_ON(intel_crtc->config->has_pch_encoder);
+	WARN_ON(crtc_state->has_pch_encoder);
 
 
 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
 
-	if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
-		intel_ddi_pre_enable_dp(encoder,
-					pipe_config->port_clock,
-					pipe_config->lane_count,
-					pipe_config->shared_dpll,
-					intel_crtc_has_type(pipe_config,
-							    INTEL_OUTPUT_DP_MST));
-	}
-	if (type == INTEL_OUTPUT_HDMI) {
-		intel_ddi_pre_enable_hdmi(encoder,
-					  pipe_config->has_infoframe,
-					  pipe_config, conn_state,
-					  pipe_config->shared_dpll);
-	}
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+		intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state);
+	else
+		intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
 }
 }
 
 
-static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
-				   const struct intel_crtc_state *old_crtc_state,
-				   const struct drm_connector_state *old_conn_state)
+static void intel_disable_ddi_buf(struct intel_encoder *encoder)
 {
 {
-	struct drm_encoder *encoder = &intel_encoder->base;
-	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
-	enum port port = intel_ddi_get_encoder_port(intel_encoder);
-	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
-	int type = intel_encoder->type;
-	uint32_t val;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum port port = intel_ddi_get_encoder_port(encoder);
 	bool wait = false;
 	bool wait = false;
-
-	if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
-		/*
-		 * old_crtc_state and old_conn_state are NULL when called from
-		 * DP_MST. The main connector associated with this port is never
-		 * bound to a crtc for MST.
-		 */
-		bool is_mst = !old_crtc_state;
-		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
-		/*
-		 * Power down sink before disabling the port, otherwise we end
-		 * up getting interrupts from the sink on detecting link loss.
-		 */
-		if (!is_mst)
-			intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
-	}
+	u32 val;
 
 
 	val = I915_READ(DDI_BUF_CTL(port));
 	val = I915_READ(DDI_BUF_CTL(port));
 	if (val & DDI_BUF_CTL_ENABLE) {
 	if (val & DDI_BUF_CTL_ENABLE) {
@@ -2284,36 +2280,75 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
 
 
 	if (wait)
 	if (wait)
 		intel_wait_ddi_buf_idle(dev_priv, port);
 		intel_wait_ddi_buf_idle(dev_priv, port);
+}
 
 
-	if (type == INTEL_OUTPUT_HDMI) {
-		dig_port->set_infoframes(encoder, false,
-					 old_crtc_state, old_conn_state);
-	}
+static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
+				      const struct intel_crtc_state *old_crtc_state,
+				      const struct drm_connector_state *old_conn_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+	struct intel_dp *intel_dp = &dig_port->dp;
+	/*
+	 * old_crtc_state and old_conn_state are NULL when called from
+	 * DP_MST. The main connector associated with this port is never
+	 * bound to a crtc for MST.
+	 */
+	bool is_mst = !old_crtc_state;
 
 
-	if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
-		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	/*
+	 * Power down sink before disabling the port, otherwise we end
+	 * up getting interrupts from the sink on detecting link loss.
+	 */
+	if (!is_mst)
+		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
 
 
-		intel_edp_panel_vdd_on(intel_dp);
-		intel_edp_panel_off(intel_dp);
-	}
+	intel_disable_ddi_buf(encoder);
 
 
-	if (dig_port)
-		intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
+	intel_edp_panel_vdd_on(intel_dp);
+	intel_edp_panel_off(intel_dp);
 
 
-	if (IS_CANNONLAKE(dev_priv))
-		I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
-			   DPCLKA_CFGCR0_DDI_CLK_OFF(port));
-	else if (IS_GEN9_BC(dev_priv))
-		I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
-					DPLL_CTRL2_DDI_CLK_OFF(port)));
-	else if (INTEL_GEN(dev_priv) < 9)
-		I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+	intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
 
 
-	if (type == INTEL_OUTPUT_HDMI) {
-		struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	intel_ddi_clk_disable(encoder);
+}
 
 
-		intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
-	}
+static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
+					const struct intel_crtc_state *old_crtc_state,
+					const struct drm_connector_state *old_conn_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+	struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
+
+	intel_disable_ddi_buf(encoder);
+
+	dig_port->set_infoframes(&encoder->base, false,
+				 old_crtc_state, old_conn_state);
+
+	intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
+
+	intel_ddi_clk_disable(encoder);
+
+	intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
+}
+
+static void intel_ddi_post_disable(struct intel_encoder *encoder,
+				   const struct intel_crtc_state *old_crtc_state,
+				   const struct drm_connector_state *old_conn_state)
+{
+	/*
+	 * old_crtc_state and old_conn_state are NULL when called from
+	 * DP_MST. The main connector associated with this port is never
+	 * bound to a crtc for MST.
+	 */
+	if (old_crtc_state &&
+	    intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
+		intel_ddi_post_disable_hdmi(encoder,
+					    old_crtc_state, old_conn_state);
+	else
+		intel_ddi_post_disable_dp(encoder,
+					  old_crtc_state, old_conn_state);
 }
 }
 
 
 void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
 void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
@@ -2333,7 +2368,8 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
 	val &= ~FDI_RX_ENABLE;
 	val &= ~FDI_RX_ENABLE;
 	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
 	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
 
 
-	intel_ddi_post_disable(encoder, old_crtc_state, old_conn_state);
+	intel_disable_ddi_buf(encoder);
+	intel_ddi_clk_disable(encoder);
 
 
 	val = I915_READ(FDI_RX_MISC(PIPE_A));
 	val = I915_READ(FDI_RX_MISC(PIPE_A));
 	val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
 	val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
@@ -2349,70 +2385,93 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
 	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
 	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
 }
 }
 
 
-static void intel_enable_ddi(struct intel_encoder *intel_encoder,
-			     const struct intel_crtc_state *pipe_config,
-			     const struct drm_connector_state *conn_state)
+static void intel_enable_ddi_dp(struct intel_encoder *encoder,
+				const struct intel_crtc_state *crtc_state,
+				const struct drm_connector_state *conn_state)
 {
 {
-	struct drm_encoder *encoder = &intel_encoder->base;
-	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
-	enum port port = intel_ddi_get_encoder_port(intel_encoder);
-	int type = intel_encoder->type;
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	enum port port = intel_ddi_get_encoder_port(encoder);
 
 
-	if (type == INTEL_OUTPUT_HDMI) {
-		struct intel_digital_port *intel_dig_port =
-			enc_to_dig_port(encoder);
-		bool clock_ratio = pipe_config->hdmi_high_tmds_clock_ratio;
-		bool scrambling = pipe_config->hdmi_scrambling;
-
-		intel_hdmi_handle_sink_scrambling(intel_encoder,
-						  conn_state->connector,
-						  clock_ratio, scrambling);
-
-		/* In HDMI/DVI mode, the port width, and swing/emphasis values
-		 * are ignored so nothing special needs to be done besides
-		 * enabling the port.
-		 */
-		I915_WRITE(DDI_BUF_CTL(port),
-			   intel_dig_port->saved_port_bits |
-			   DDI_BUF_CTL_ENABLE);
-	} else if (type == INTEL_OUTPUT_EDP) {
-		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
+		intel_dp_stop_link_train(intel_dp);
 
 
-		if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
-			intel_dp_stop_link_train(intel_dp);
+	intel_edp_backlight_on(crtc_state, conn_state);
+	intel_psr_enable(intel_dp, crtc_state);
+	intel_edp_drrs_enable(intel_dp, crtc_state);
 
 
-		intel_edp_backlight_on(pipe_config, conn_state);
-		intel_psr_enable(intel_dp, pipe_config);
-		intel_edp_drrs_enable(intel_dp, pipe_config);
-	}
+	if (crtc_state->has_audio)
+		intel_audio_codec_enable(encoder, crtc_state, conn_state);
+}
 
 
-	if (pipe_config->has_audio)
-		intel_audio_codec_enable(intel_encoder, pipe_config, conn_state);
+static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
+				  const struct intel_crtc_state *crtc_state,
+				  const struct drm_connector_state *conn_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+	enum port port = intel_ddi_get_encoder_port(encoder);
+
+	intel_hdmi_handle_sink_scrambling(encoder,
+					  conn_state->connector,
+					  crtc_state->hdmi_high_tmds_clock_ratio,
+					  crtc_state->hdmi_scrambling);
+
+	/* In HDMI/DVI mode, the port width, and swing/emphasis values
+	 * are ignored so nothing special needs to be done besides
+	 * enabling the port.
+	 */
+	I915_WRITE(DDI_BUF_CTL(port),
+		   dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE);
+
+	if (crtc_state->has_audio)
+		intel_audio_codec_enable(encoder, crtc_state, conn_state);
 }
 }
 
 
-static void intel_disable_ddi(struct intel_encoder *intel_encoder,
-			      const struct intel_crtc_state *old_crtc_state,
-			      const struct drm_connector_state *old_conn_state)
+static void intel_enable_ddi(struct intel_encoder *encoder,
+			     const struct intel_crtc_state *crtc_state,
+			     const struct drm_connector_state *conn_state)
 {
 {
-	struct drm_encoder *encoder = &intel_encoder->base;
-	int type = intel_encoder->type;
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+		intel_enable_ddi_hdmi(encoder, crtc_state, conn_state);
+	else
+		intel_enable_ddi_dp(encoder, crtc_state, conn_state);
+}
+
+static void intel_disable_ddi_dp(struct intel_encoder *encoder,
+				 const struct intel_crtc_state *old_crtc_state,
+				 const struct drm_connector_state *old_conn_state)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
 
 	if (old_crtc_state->has_audio)
 	if (old_crtc_state->has_audio)
-		intel_audio_codec_disable(intel_encoder);
+		intel_audio_codec_disable(encoder);
 
 
-	if (type == INTEL_OUTPUT_HDMI) {
-		intel_hdmi_handle_sink_scrambling(intel_encoder,
-						  old_conn_state->connector,
-						  false, false);
-	}
+	intel_edp_drrs_disable(intel_dp, old_crtc_state);
+	intel_psr_disable(intel_dp, old_crtc_state);
+	intel_edp_backlight_off(old_conn_state);
+}
 
 
-	if (type == INTEL_OUTPUT_EDP) {
-		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
+				   const struct intel_crtc_state *old_crtc_state,
+				   const struct drm_connector_state *old_conn_state)
+{
+	if (old_crtc_state->has_audio)
+		intel_audio_codec_disable(encoder);
 
 
-		intel_edp_drrs_disable(intel_dp, old_crtc_state);
-		intel_psr_disable(intel_dp, old_crtc_state);
-		intel_edp_backlight_off(old_conn_state);
-	}
+	intel_hdmi_handle_sink_scrambling(encoder,
+					  old_conn_state->connector,
+					  false, false);
+}
+
+static void intel_disable_ddi(struct intel_encoder *encoder,
+			      const struct intel_crtc_state *old_crtc_state,
+			      const struct drm_connector_state *old_conn_state)
+{
+	if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
+		intel_disable_ddi_hdmi(encoder, old_crtc_state, old_conn_state);
+	else
+		intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state);
 }
 }
 
 
 static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
 static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,

+ 79 - 127
drivers/gpu/drm/i915/intel_display.c

@@ -2847,7 +2847,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 
 
 		if (intel_plane_ggtt_offset(state) == plane_config->base) {
 		if (intel_plane_ggtt_offset(state) == plane_config->base) {
 			fb = c->primary->fb;
 			fb = c->primary->fb;
-			drm_framebuffer_reference(fb);
+			drm_framebuffer_get(fb);
 			goto valid_fb;
 			goto valid_fb;
 		}
 		}
 	}
 	}
@@ -2878,7 +2878,7 @@ valid_fb:
 			  intel_crtc->pipe, PTR_ERR(intel_state->vma));
 			  intel_crtc->pipe, PTR_ERR(intel_state->vma));
 
 
 		intel_state->vma = NULL;
 		intel_state->vma = NULL;
-		drm_framebuffer_unreference(fb);
+		drm_framebuffer_put(fb);
 		return;
 		return;
 	}
 	}
 
 
@@ -2899,7 +2899,7 @@ valid_fb:
 	if (i915_gem_object_is_tiled(obj))
 	if (i915_gem_object_is_tiled(obj))
 		dev_priv->preserve_bios_swizzle = true;
 		dev_priv->preserve_bios_swizzle = true;
 
 
-	drm_framebuffer_reference(fb);
+	drm_framebuffer_get(fb);
 	primary->fb = primary->state->fb = fb;
 	primary->fb = primary->state->fb = fb;
 	primary->crtc = primary->state->crtc = &intel_crtc->base;
 	primary->crtc = primary->state->crtc = &intel_crtc->base;
 
 
@@ -3289,7 +3289,6 @@ static void i9xx_update_primary_plane(struct intel_plane *primary,
 				      const struct intel_plane_state *plane_state)
 				      const struct intel_plane_state *plane_state)
 {
 {
 	struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
 	struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
 	const struct drm_framebuffer *fb = plane_state->base.fb;
 	const struct drm_framebuffer *fb = plane_state->base.fb;
 	enum plane plane = primary->plane;
 	enum plane plane = primary->plane;
 	u32 linear_offset;
 	u32 linear_offset;
@@ -3298,16 +3297,14 @@ static void i9xx_update_primary_plane(struct intel_plane *primary,
 	int x = plane_state->main.x;
 	int x = plane_state->main.x;
 	int y = plane_state->main.y;
 	int y = plane_state->main.y;
 	unsigned long irqflags;
 	unsigned long irqflags;
+	u32 dspaddr_offset;
 
 
 	linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
 	linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
 
 
 	if (INTEL_GEN(dev_priv) >= 4)
 	if (INTEL_GEN(dev_priv) >= 4)
-		crtc->dspaddr_offset = plane_state->main.offset;
+		dspaddr_offset = plane_state->main.offset;
 	else
 	else
-		crtc->dspaddr_offset = linear_offset;
-
-	crtc->adjusted_x = x;
-	crtc->adjusted_y = y;
+		dspaddr_offset = linear_offset;
 
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
 
@@ -3333,18 +3330,18 @@ static void i9xx_update_primary_plane(struct intel_plane *primary,
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		I915_WRITE_FW(DSPSURF(plane),
 		I915_WRITE_FW(DSPSURF(plane),
 			      intel_plane_ggtt_offset(plane_state) +
 			      intel_plane_ggtt_offset(plane_state) +
-			      crtc->dspaddr_offset);
+			      dspaddr_offset);
 		I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x);
 		I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x);
 	} else if (INTEL_GEN(dev_priv) >= 4) {
 	} else if (INTEL_GEN(dev_priv) >= 4) {
 		I915_WRITE_FW(DSPSURF(plane),
 		I915_WRITE_FW(DSPSURF(plane),
 			      intel_plane_ggtt_offset(plane_state) +
 			      intel_plane_ggtt_offset(plane_state) +
-			      crtc->dspaddr_offset);
+			      dspaddr_offset);
 		I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x);
 		I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x);
 		I915_WRITE_FW(DSPLINOFF(plane), linear_offset);
 		I915_WRITE_FW(DSPLINOFF(plane), linear_offset);
 	} else {
 	} else {
 		I915_WRITE_FW(DSPADDR(plane),
 		I915_WRITE_FW(DSPADDR(plane),
 			      intel_plane_ggtt_offset(plane_state) +
 			      intel_plane_ggtt_offset(plane_state) +
-			      crtc->dspaddr_offset);
+			      dspaddr_offset);
 	}
 	}
 	POSTING_READ_FW(reg);
 	POSTING_READ_FW(reg);
 
 
@@ -3544,100 +3541,6 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
 	return plane_ctl;
 	return plane_ctl;
 }
 }
 
 
-static void skylake_update_primary_plane(struct intel_plane *plane,
-					 const struct intel_crtc_state *crtc_state,
-					 const struct intel_plane_state *plane_state)
-{
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	const struct drm_framebuffer *fb = plane_state->base.fb;
-	enum plane_id plane_id = plane->id;
-	enum pipe pipe = plane->pipe;
-	u32 plane_ctl = plane_state->ctl;
-	unsigned int rotation = plane_state->base.rotation;
-	u32 stride = skl_plane_stride(fb, 0, rotation);
-	u32 aux_stride = skl_plane_stride(fb, 1, rotation);
-	u32 surf_addr = plane_state->main.offset;
-	int scaler_id = plane_state->scaler_id;
-	int src_x = plane_state->main.x;
-	int src_y = plane_state->main.y;
-	int src_w = drm_rect_width(&plane_state->base.src) >> 16;
-	int src_h = drm_rect_height(&plane_state->base.src) >> 16;
-	int dst_x = plane_state->base.dst.x1;
-	int dst_y = plane_state->base.dst.y1;
-	int dst_w = drm_rect_width(&plane_state->base.dst);
-	int dst_h = drm_rect_height(&plane_state->base.dst);
-	unsigned long irqflags;
-
-	/* Sizes are 0 based */
-	src_w--;
-	src_h--;
-	dst_w--;
-	dst_h--;
-
-	crtc->dspaddr_offset = surf_addr;
-
-	crtc->adjusted_x = src_x;
-	crtc->adjusted_y = src_y;
-
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
-	if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
-		I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
-			      PLANE_COLOR_PIPE_GAMMA_ENABLE |
-			      PLANE_COLOR_PIPE_CSC_ENABLE |
-			      PLANE_COLOR_PLANE_GAMMA_DISABLE);
-	}
-
-	I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
-	I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x);
-	I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
-	I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
-	I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
-		      (plane_state->aux.offset - surf_addr) | aux_stride);
-	I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
-		      (plane_state->aux.y << 16) | plane_state->aux.x);
-
-	if (scaler_id >= 0) {
-		uint32_t ps_ctrl = 0;
-
-		WARN_ON(!dst_w || !dst_h);
-		ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane_id) |
-			crtc_state->scaler_state.scalers[scaler_id].mode;
-		I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
-		I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
-		I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
-		I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
-		I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0);
-	} else {
-		I915_WRITE_FW(PLANE_POS(pipe, plane_id), (dst_y << 16) | dst_x);
-	}
-
-	I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
-		      intel_plane_ggtt_offset(plane_state) + surf_addr);
-
-	POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
-
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void skylake_disable_primary_plane(struct intel_plane *primary,
-					  struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
-	enum plane_id plane_id = primary->id;
-	enum pipe pipe = primary->pipe;
-	unsigned long irqflags;
-
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
-	I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
-	I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
-	POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
-
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
 static int
 static int
 __intel_display_resume(struct drm_device *dev,
 __intel_display_resume(struct drm_device *dev,
 		       struct drm_atomic_state *state,
 		       struct drm_atomic_state *state,
@@ -6139,6 +6042,19 @@ struct intel_connector *intel_connector_alloc(void)
 	return connector;
 	return connector;
 }
 }
 
 
+/*
+ * Free the bits allocated by intel_connector_alloc.
+ * This should only be used after intel_connector_alloc has returned
+ * successfully, and before drm_connector_init returns successfully.
+ * Otherwise the destroy callbacks for the connector and the state should
+ * take care of proper cleanup/free
+ */
+void intel_connector_free(struct intel_connector *connector)
+{
+	kfree(to_intel_digital_connector_state(connector->base.state));
+	kfree(connector);
+}
+
 /* Simple connector->get_hw_state implementation for encoders that support only
 /* Simple connector->get_hw_state implementation for encoders that support only
  * one connector and no cloning and hence the encoder state determines the state
  * one connector and no cloning and hence the encoder state determines the state
  * of the connector. */
  * of the connector. */
@@ -6522,11 +6438,9 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
 
 
 	crtc_state->dpll_hw_state.fp0 = fp;
 	crtc_state->dpll_hw_state.fp0 = fp;
 
 
-	crtc->lowfreq_avail = false;
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
 	    reduced_clock) {
 	    reduced_clock) {
 		crtc_state->dpll_hw_state.fp1 = fp2;
 		crtc_state->dpll_hw_state.fp1 = fp2;
-		crtc->lowfreq_avail = true;
 	} else {
 	} else {
 		crtc_state->dpll_hw_state.fp1 = fp;
 		crtc_state->dpll_hw_state.fp1 = fp;
 	}
 	}
@@ -7221,15 +7135,6 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
 		}
 		}
 	}
 	}
 
 
-	if (HAS_PIPE_CXSR(dev_priv)) {
-		if (intel_crtc->lowfreq_avail) {
-			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
-			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
-		} else {
-			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
-		}
-	}
-
 	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
 	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
 		if (INTEL_GEN(dev_priv) < 4 ||
 		if (INTEL_GEN(dev_priv) < 4 ||
 		    intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
 		    intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
@@ -8365,8 +8270,6 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
 	memset(&crtc_state->dpll_hw_state, 0,
 	memset(&crtc_state->dpll_hw_state, 0,
 	       sizeof(crtc_state->dpll_hw_state));
 	       sizeof(crtc_state->dpll_hw_state));
 
 
-	crtc->lowfreq_avail = false;
-
 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
 	if (!crtc_state->has_pch_encoder)
 	if (!crtc_state->has_pch_encoder)
 		return 0;
 		return 0;
@@ -9025,8 +8928,6 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
 		}
 		}
 	}
 	}
 
 
-	crtc->lowfreq_avail = false;
-
 	return 0;
 	return 0;
 }
 }
 
 
@@ -9846,7 +9747,7 @@ mode_fits_in_fbdev(struct drm_device *dev,
 	if (obj->base.size < mode->vdisplay * fb->pitches[0])
 	if (obj->base.size < mode->vdisplay * fb->pitches[0])
 		return NULL;
 		return NULL;
 
 
-	drm_framebuffer_reference(fb);
+	drm_framebuffer_get(fb);
 	return fb;
 	return fb;
 #else
 #else
 	return NULL;
 	return NULL;
@@ -10027,7 +9928,7 @@ found:
 	if (ret)
 	if (ret)
 		goto fail;
 		goto fail;
 
 
-	drm_framebuffer_unreference(fb);
+	drm_framebuffer_put(fb);
 
 
 	ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
 	ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
 	if (ret)
 	if (ret)
@@ -10662,6 +10563,52 @@ intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
 		      m_n->link_m, m_n->link_n, m_n->tu);
 		      m_n->link_m, m_n->link_n, m_n->tu);
 }
 }
 
 
+#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
+
+static const char * const output_type_str[] = {
+	OUTPUT_TYPE(UNUSED),
+	OUTPUT_TYPE(ANALOG),
+	OUTPUT_TYPE(DVO),
+	OUTPUT_TYPE(SDVO),
+	OUTPUT_TYPE(LVDS),
+	OUTPUT_TYPE(TVOUT),
+	OUTPUT_TYPE(HDMI),
+	OUTPUT_TYPE(DP),
+	OUTPUT_TYPE(EDP),
+	OUTPUT_TYPE(DSI),
+	OUTPUT_TYPE(UNKNOWN),
+	OUTPUT_TYPE(DP_MST),
+};
+
+#undef OUTPUT_TYPE
+
+static void snprintf_output_types(char *buf, size_t len,
+				  unsigned int output_types)
+{
+	char *str = buf;
+	int i;
+
+	str[0] = '\0';
+
+	for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
+		int r;
+
+		if ((output_types & BIT(i)) == 0)
+			continue;
+
+		r = snprintf(str, len, "%s%s",
+			     str != buf ? "," : "", output_type_str[i]);
+		if (r >= len)
+			break;
+		str += r;
+		len -= r;
+
+		output_types &= ~BIT(i);
+	}
+
+	WARN_ON_ONCE(output_types != 0);
+}
+
 static void intel_dump_pipe_config(struct intel_crtc *crtc,
 static void intel_dump_pipe_config(struct intel_crtc *crtc,
 				   struct intel_crtc_state *pipe_config,
 				   struct intel_crtc_state *pipe_config,
 				   const char *context)
 				   const char *context)
@@ -10672,10 +10619,15 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
 	struct intel_plane *intel_plane;
 	struct intel_plane *intel_plane;
 	struct intel_plane_state *state;
 	struct intel_plane_state *state;
 	struct drm_framebuffer *fb;
 	struct drm_framebuffer *fb;
+	char buf[64];
 
 
 	DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
 	DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
 		      crtc->base.base.id, crtc->base.name, context);
 		      crtc->base.base.id, crtc->base.name, context);
 
 
+	snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
+	DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
+		      buf, pipe_config->output_types);
+
 	DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
 	DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
 		      transcoder_name(pipe_config->cpu_transcoder),
 		      transcoder_name(pipe_config->cpu_transcoder),
 		      pipe_config->pipe_bpp, pipe_config->dither);
 		      pipe_config->pipe_bpp, pipe_config->dither);
@@ -13229,8 +13181,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 		num_formats = ARRAY_SIZE(skl_primary_formats);
 		num_formats = ARRAY_SIZE(skl_primary_formats);
 		modifiers = skl_format_modifiers_ccs;
 		modifiers = skl_format_modifiers_ccs;
 
 
-		primary->update_plane = skylake_update_primary_plane;
-		primary->disable_plane = skylake_disable_primary_plane;
+		primary->update_plane = skl_update_plane;
+		primary->disable_plane = skl_disable_plane;
 	} else if (INTEL_GEN(dev_priv) >= 9) {
 	} else if (INTEL_GEN(dev_priv) >= 9) {
 		intel_primary_formats = skl_primary_formats;
 		intel_primary_formats = skl_primary_formats;
 		num_formats = ARRAY_SIZE(skl_primary_formats);
 		num_formats = ARRAY_SIZE(skl_primary_formats);
@@ -13239,8 +13191,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 		else
 		else
 			modifiers = skl_format_modifiers_noccs;
 			modifiers = skl_format_modifiers_noccs;
 
 
-		primary->update_plane = skylake_update_primary_plane;
-		primary->disable_plane = skylake_disable_primary_plane;
+		primary->update_plane = skl_update_plane;
+		primary->disable_plane = skl_disable_plane;
 	} else if (INTEL_GEN(dev_priv) >= 4) {
 	} else if (INTEL_GEN(dev_priv) >= 4) {
 		intel_primary_formats = i965_primary_formats;
 		intel_primary_formats = i965_primary_formats;
 		num_formats = ARRAY_SIZE(i965_primary_formats);
 		num_formats = ARRAY_SIZE(i965_primary_formats);

+ 5 - 5
drivers/gpu/drm/i915/intel_dp.c

@@ -1007,7 +1007,7 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
 	else
 	else
 		precharge = 5;
 		precharge = 5;
 
 
-	if (IS_BROADWELL(dev_priv) && intel_dig_port->port == PORT_A)
+	if (IS_BROADWELL(dev_priv))
 		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
 		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
 	else
 	else
 		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
 		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
@@ -1032,7 +1032,7 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
 	       DP_AUX_CH_CTL_DONE |
 	       DP_AUX_CH_CTL_DONE |
 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
 	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
 	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
-	       DP_AUX_CH_CTL_TIME_OUT_1600us |
+	       DP_AUX_CH_CTL_TIME_OUT_MAX |
 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
 	       DP_AUX_CH_CTL_RECEIVE_ERROR |
 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
 	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
 	       DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
 	       DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
@@ -1832,6 +1832,8 @@ found:
 	if (!HAS_DDI(dev_priv))
 	if (!HAS_DDI(dev_priv))
 		intel_dp_set_clock(encoder, pipe_config);
 		intel_dp_set_clock(encoder, pipe_config);
 
 
+	intel_psr_compute_config(intel_dp, pipe_config);
+
 	return true;
 	return true;
 }
 }
 
 
@@ -3153,9 +3155,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
 	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 	enum port port = dp_to_dig_port(intel_dp)->port;
 	enum port port = dp_to_dig_port(intel_dp)->port;
 
 
-	if (IS_GEN9_LP(dev_priv))
-		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
-	else if (INTEL_GEN(dev_priv) >= 9) {
+	if (INTEL_GEN(dev_priv) >= 9) {
 		struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
 		struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
 		return intel_ddi_dp_voltage_max(encoder);
 		return intel_ddi_dp_voltage_max(encoder);
 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))

+ 30 - 9
drivers/gpu/drm/i915/intel_dp_mst.c

@@ -454,32 +454,52 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
 	struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
 	struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	struct drm_device *dev = intel_dig_port->base.base.dev;
 	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_connector *intel_connector;
 	struct intel_connector *intel_connector;
 	struct drm_connector *connector;
 	struct drm_connector *connector;
-	int i;
+	enum pipe pipe;
+	int ret;
 
 
 	intel_connector = intel_connector_alloc();
 	intel_connector = intel_connector_alloc();
 	if (!intel_connector)
 	if (!intel_connector)
 		return NULL;
 		return NULL;
 
 
 	connector = &intel_connector->base;
 	connector = &intel_connector->base;
-	drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
+	ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
+				 DRM_MODE_CONNECTOR_DisplayPort);
+	if (ret) {
+		intel_connector_free(intel_connector);
+		return NULL;
+	}
+
 	drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
 	drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
 
 
 	intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
 	intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
 	intel_connector->mst_port = intel_dp;
 	intel_connector->mst_port = intel_dp;
 	intel_connector->port = port;
 	intel_connector->port = port;
 
 
-	for (i = PIPE_A; i <= PIPE_C; i++) {
-		drm_mode_connector_attach_encoder(&intel_connector->base,
-						  &intel_dp->mst_encoders[i]->base.base);
+	for_each_pipe(dev_priv, pipe) {
+		struct drm_encoder *enc =
+			&intel_dp->mst_encoders[pipe]->base.base;
+
+		ret = drm_mode_connector_attach_encoder(&intel_connector->base,
+							enc);
+		if (ret)
+			goto err;
 	}
 	}
 
 
 	drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
 	drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
 	drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
 	drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
 
 
-	drm_mode_connector_set_path_property(connector, pathprop);
+	ret = drm_mode_connector_set_path_property(connector, pathprop);
+	if (ret)
+		goto err;
+
 	return connector;
 	return connector;
+
+err:
+	drm_connector_cleanup(connector);
+	return NULL;
 }
 }
 
 
 static void intel_dp_register_mst_connector(struct drm_connector *connector)
 static void intel_dp_register_mst_connector(struct drm_connector *connector)
@@ -569,11 +589,12 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
 static bool
 static bool
 intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
 intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
 {
 {
-	int i;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+	enum pipe pipe;
 
 
-	for (i = PIPE_A; i <= PIPE_C; i++)
-		intel_dp->mst_encoders[i] = intel_dp_create_fake_mst_encoder(intel_dig_port, i);
+	for_each_pipe(dev_priv, pipe)
+		intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(intel_dig_port, pipe);
 	return true;
 	return true;
 }
 }
 
 

+ 11 - 9
drivers/gpu/drm/i915/intel_drv.h

@@ -718,6 +718,9 @@ struct intel_crtc_state {
 	struct intel_link_m_n dp_m2_n2;
 	struct intel_link_m_n dp_m2_n2;
 	bool has_drrs;
 	bool has_drrs;
 
 
+	bool has_psr;
+	bool has_psr2;
+
 	/*
 	/*
 	 * Frequence the dpll for the port should run at. Differs from the
 	 * Frequence the dpll for the port should run at. Differs from the
 	 * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
 	 * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
@@ -800,18 +803,10 @@ struct intel_crtc {
 	 * some outputs connected to this crtc.
 	 * some outputs connected to this crtc.
 	 */
 	 */
 	bool active;
 	bool active;
-	bool lowfreq_avail;
 	u8 plane_ids_mask;
 	u8 plane_ids_mask;
 	unsigned long long enabled_power_domains;
 	unsigned long long enabled_power_domains;
 	struct intel_overlay *overlay;
 	struct intel_overlay *overlay;
 
 
-	/* Display surface base address adjustement for pageflips. Note that on
-	 * gen4+ this only adjusts up to a tile, offsets within a tile are
-	 * handled in the hw itself (with the TILEOFF register). */
-	u32 dspaddr_offset;
-	int adjusted_x;
-	int adjusted_y;
-
 	struct intel_crtc_state *config;
 	struct intel_crtc_state *config;
 
 
 	/* global reset count when the last flip was submitted */
 	/* global reset count when the last flip was submitted */
@@ -1066,7 +1061,7 @@ struct intel_digital_port {
 
 
 	void (*write_infoframe)(struct drm_encoder *encoder,
 	void (*write_infoframe)(struct drm_encoder *encoder,
 				const struct intel_crtc_state *crtc_state,
 				const struct intel_crtc_state *crtc_state,
-				enum hdmi_infoframe_type type,
+				unsigned int type,
 				const void *frame, ssize_t len);
 				const void *frame, ssize_t len);
 	void (*set_infoframes)(struct drm_encoder *encoder,
 	void (*set_infoframes)(struct drm_encoder *encoder,
 			       bool enable,
 			       bool enable,
@@ -1360,6 +1355,7 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
 void intel_encoder_destroy(struct drm_encoder *encoder);
 void intel_encoder_destroy(struct drm_encoder *encoder);
 int intel_connector_init(struct intel_connector *);
 int intel_connector_init(struct intel_connector *);
 struct intel_connector *intel_connector_alloc(void);
 struct intel_connector *intel_connector_alloc(void);
+void intel_connector_free(struct intel_connector *connector);
 bool intel_connector_get_hw_state(struct intel_connector *connector);
 bool intel_connector_get_hw_state(struct intel_connector *connector);
 void intel_connector_attach_encoder(struct intel_connector *connector,
 void intel_connector_attach_encoder(struct intel_connector *connector,
 				    struct intel_encoder *encoder);
 				    struct intel_encoder *encoder);
@@ -1764,6 +1760,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
 void intel_psr_init(struct drm_i915_private *dev_priv);
 void intel_psr_init(struct drm_i915_private *dev_priv);
 void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
 void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
 				   unsigned frontbuffer_bits);
 				   unsigned frontbuffer_bits);
+void intel_psr_compute_config(struct intel_dp *intel_dp,
+			      struct intel_crtc_state *crtc_state);
 
 
 /* intel_runtime_pm.c */
 /* intel_runtime_pm.c */
 int intel_power_domains_init(struct drm_i915_private *);
 int intel_power_domains_init(struct drm_i915_private *);
@@ -1923,6 +1921,10 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv);
 			      struct drm_file *file_priv);
 void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
 void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
 void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
 void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
+void skl_update_plane(struct intel_plane *plane,
+		      const struct intel_crtc_state *crtc_state,
+		      const struct intel_plane_state *plane_state);
+void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
 
 
 /* intel_tv.c */
 /* intel_tv.c */
 void intel_tv_init(struct drm_i915_private *dev_priv);
 void intel_tv_init(struct drm_i915_private *dev_priv);

+ 4 - 33
drivers/gpu/drm/i915/intel_dsi.c

@@ -1751,42 +1751,13 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
 	else
 	else
 		intel_encoder->crtc_mask = BIT(PIPE_B);
 		intel_encoder->crtc_mask = BIT(PIPE_B);
 
 
-	if (dev_priv->vbt.dsi.config->dual_link) {
+	if (dev_priv->vbt.dsi.config->dual_link)
 		intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
 		intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
-
-		switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
-		case DL_DCS_PORT_A:
-			intel_dsi->dcs_backlight_ports = BIT(PORT_A);
-			break;
-		case DL_DCS_PORT_C:
-			intel_dsi->dcs_backlight_ports = BIT(PORT_C);
-			break;
-		default:
-		case DL_DCS_PORT_A_AND_C:
-			intel_dsi->dcs_backlight_ports = BIT(PORT_A) | BIT(PORT_C);
-			break;
-		}
-
-		switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
-		case DL_DCS_PORT_A:
-			intel_dsi->dcs_cabc_ports = BIT(PORT_A);
-			break;
-		case DL_DCS_PORT_C:
-			intel_dsi->dcs_cabc_ports = BIT(PORT_C);
-			break;
-		default:
-		case DL_DCS_PORT_A_AND_C:
-			intel_dsi->dcs_cabc_ports = BIT(PORT_A) | BIT(PORT_C);
-			break;
-		}
-	} else {
+	else
 		intel_dsi->ports = BIT(port);
 		intel_dsi->ports = BIT(port);
-		intel_dsi->dcs_backlight_ports = BIT(port);
-		intel_dsi->dcs_cabc_ports = BIT(port);
-	}
 
 
-	if (!dev_priv->vbt.dsi.config->cabc_supported)
-		intel_dsi->dcs_cabc_ports = 0;
+	intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports;
+	intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports;
 
 
 	/* Create a DSI host (and a device) for each port. */
 	/* Create a DSI host (and a device) for each port. */
 	for_each_dsi_port(port, intel_dsi->ports) {
 	for_each_dsi_port(port, intel_dsi->ports) {

+ 24 - 20
drivers/gpu/drm/i915/intel_engine_cs.c

@@ -25,6 +25,7 @@
 #include <drm/drm_print.h>
 #include <drm/drm_print.h>
 
 
 #include "i915_drv.h"
 #include "i915_drv.h"
+#include "i915_vgpu.h"
 #include "intel_ringbuffer.h"
 #include "intel_ringbuffer.h"
 #include "intel_lrc.h"
 #include "intel_lrc.h"
 
 
@@ -386,10 +387,6 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine)
 
 
 static bool csb_force_mmio(struct drm_i915_private *i915)
 static bool csb_force_mmio(struct drm_i915_private *i915)
 {
 {
-	/* GVT emulation depends upon intercepting CSB mmio */
-	if (intel_vgpu_active(i915))
-		return true;
-
 	/*
 	/*
 	 * IOMMU adds unpredictable latency causing the CSB write (from the
 	 * IOMMU adds unpredictable latency causing the CSB write (from the
 	 * GPU into the HWSP) to only be visible some time after the interrupt
 	 * GPU into the HWSP) to only be visible some time after the interrupt
@@ -398,6 +395,10 @@ static bool csb_force_mmio(struct drm_i915_private *i915)
 	if (intel_vtd_active())
 	if (intel_vtd_active())
 		return true;
 		return true;
 
 
+	/* Older GVT emulation depends upon intercepting CSB mmio */
+	if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915))
+		return true;
+
 	return false;
 	return false;
 }
 }
 
 
@@ -1625,8 +1626,10 @@ static void print_request(struct drm_printer *m,
 			  struct drm_i915_gem_request *rq,
 			  struct drm_i915_gem_request *rq,
 			  const char *prefix)
 			  const char *prefix)
 {
 {
-	drm_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
-		   rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
+	drm_printf(m, "%s%x%s [%x:%x] prio=%d @ %dms: %s\n", prefix,
+		   rq->global_seqno,
+		   i915_gem_request_completed(rq) ? "!" : "",
+		   rq->ctx->hw_id, rq->fence.seqno,
 		   rq->priotree.priority,
 		   rq->priotree.priority,
 		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
 		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
 		   rq->timeline->common->name);
 		   rq->timeline->common->name);
@@ -1634,8 +1637,9 @@ static void print_request(struct drm_printer *m,
 
 
 void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
 void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
 {
 {
-	struct intel_breadcrumbs *b = &engine->breadcrumbs;
-	struct i915_gpu_error *error = &engine->i915->gpu_error;
+	struct intel_breadcrumbs * const b = &engine->breadcrumbs;
+	const struct intel_engine_execlists * const execlists = &engine->execlists;
+	struct i915_gpu_error * const error = &engine->i915->gpu_error;
 	struct drm_i915_private *dev_priv = engine->i915;
 	struct drm_i915_private *dev_priv = engine->i915;
 	struct drm_i915_gem_request *rq;
 	struct drm_i915_gem_request *rq;
 	struct rb_node *rb;
 	struct rb_node *rb;
@@ -1699,7 +1703,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
 
 
 	if (i915_modparams.enable_execlists) {
 	if (i915_modparams.enable_execlists) {
 		const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
 		const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
-		struct intel_engine_execlists * const execlists = &engine->execlists;
 		u32 ptr, read, write;
 		u32 ptr, read, write;
 		unsigned int idx;
 		unsigned int idx;
 
 
@@ -1747,17 +1750,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
 			}
 			}
 		}
 		}
 		rcu_read_unlock();
 		rcu_read_unlock();
-
-		spin_lock_irq(&engine->timeline->lock);
-		for (rb = execlists->first; rb; rb = rb_next(rb)) {
-			struct i915_priolist *p =
-				rb_entry(rb, typeof(*p), node);
-
-			list_for_each_entry(rq, &p->requests,
-					    priotree.link)
-				print_request(m, rq, "\t\tQ ");
-		}
-		spin_unlock_irq(&engine->timeline->lock);
 	} else if (INTEL_GEN(dev_priv) > 6) {
 	} else if (INTEL_GEN(dev_priv) > 6) {
 		drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
 		drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
 			   I915_READ(RING_PP_DIR_BASE(engine)));
 			   I915_READ(RING_PP_DIR_BASE(engine)));
@@ -1767,6 +1759,18 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
 			   I915_READ(RING_PP_DIR_DCLV(engine)));
 			   I915_READ(RING_PP_DIR_DCLV(engine)));
 	}
 	}
 
 
+	spin_lock_irq(&engine->timeline->lock);
+	list_for_each_entry(rq, &engine->timeline->requests, link)
+		print_request(m, rq, "\t\tE ");
+	for (rb = execlists->first; rb; rb = rb_next(rb)) {
+		struct i915_priolist *p =
+			rb_entry(rb, typeof(*p), node);
+
+		list_for_each_entry(rq, &p->requests, priotree.link)
+			print_request(m, rq, "\t\tQ ");
+	}
+	spin_unlock_irq(&engine->timeline->lock);
+
 	spin_lock_irq(&b->rb_lock);
 	spin_lock_irq(&b->rb_lock);
 	for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
 	for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
 		struct intel_wait *w = rb_entry(rb, typeof(*w), node);
 		struct intel_wait *w = rb_entry(rb, typeof(*w), node);

+ 8 - 5
drivers/gpu/drm/i915/intel_fbc.c

@@ -69,9 +69,9 @@ static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
  * address we program because it starts at the real start of the buffer, so we
  * address we program because it starts at the real start of the buffer, so we
  * have to take this into consideration here.
  * have to take this into consideration here.
  */
  */
-static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
+static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
 {
 {
-	return crtc->base.y - crtc->adjusted_y;
+	return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y;
 }
 }
 
 
 /*
 /*
@@ -727,8 +727,8 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
 
 
 	intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
 	intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
 					&effective_h);
 					&effective_h);
-	effective_w += crtc->adjusted_x;
-	effective_h += crtc->adjusted_y;
+	effective_w += fbc->state_cache.plane.adjusted_x;
+	effective_h += fbc->state_cache.plane.adjusted_y;
 
 
 	return effective_w <= max_w && effective_h <= max_h;
 	return effective_w <= max_w && effective_h <= max_h;
 }
 }
@@ -757,6 +757,9 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
 	cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
 	cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
 	cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
 	cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
 	cache->plane.visible = plane_state->base.visible;
 	cache->plane.visible = plane_state->base.visible;
+	cache->plane.adjusted_x = plane_state->main.x;
+	cache->plane.adjusted_y = plane_state->main.y;
+	cache->plane.y = plane_state->base.src.y1 >> 16;
 
 
 	if (!cache->plane.visible)
 	if (!cache->plane.visible)
 		return;
 		return;
@@ -888,7 +891,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
 
 
 	params->crtc.pipe = crtc->pipe;
 	params->crtc.pipe = crtc->pipe;
 	params->crtc.plane = crtc->plane;
 	params->crtc.plane = crtc->plane;
-	params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
+	params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);
 
 
 	params->fb.format = cache->fb.format;
 	params->fb.format = cache->fb.format;
 	params->fb.stride = cache->fb.stride;
 	params->fb.stride = cache->fb.stride;

+ 2 - 2
drivers/gpu/drm/i915/intel_fbdev.c

@@ -189,7 +189,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 			      " releasing it\n",
 			      " releasing it\n",
 			      intel_fb->base.width, intel_fb->base.height,
 			      intel_fb->base.width, intel_fb->base.height,
 			      sizes->fb_width, sizes->fb_height);
 			      sizes->fb_width, sizes->fb_height);
-		drm_framebuffer_unreference(&intel_fb->base);
+		drm_framebuffer_put(&intel_fb->base);
 		intel_fb = ifbdev->fb = NULL;
 		intel_fb = ifbdev->fb = NULL;
 	}
 	}
 	if (!intel_fb || WARN_ON(!intel_fb->obj)) {
 	if (!intel_fb || WARN_ON(!intel_fb->obj)) {
@@ -627,7 +627,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
 	ifbdev->preferred_bpp = fb->base.format->cpp[0] * 8;
 	ifbdev->preferred_bpp = fb->base.format->cpp[0] * 8;
 	ifbdev->fb = fb;
 	ifbdev->fb = fb;
 
 
-	drm_framebuffer_reference(&ifbdev->fb->base);
+	drm_framebuffer_get(&ifbdev->fb->base);
 
 
 	/* Final pass to check if any active pipes don't have fbs */
 	/* Final pass to check if any active pipes don't have fbs */
 	for_each_crtc(dev, crtc) {
 	for_each_crtc(dev, crtc) {

+ 104 - 0
drivers/gpu/drm/i915/intel_guc.c

@@ -67,6 +67,99 @@ void intel_guc_init_early(struct intel_guc *guc)
 	guc->notify = gen8_guc_raise_irq;
 	guc->notify = gen8_guc_raise_irq;
 }
 }
 
 
+static u32 get_gt_type(struct drm_i915_private *dev_priv)
+{
+	/* XXX: GT type based on PCI device ID? field seems unused by fw */
+	return 0;
+}
+
+static u32 get_core_family(struct drm_i915_private *dev_priv)
+{
+	u32 gen = INTEL_GEN(dev_priv);
+
+	switch (gen) {
+	case 9:
+		return GUC_CORE_FAMILY_GEN9;
+
+	default:
+		MISSING_CASE(gen);
+		return GUC_CORE_FAMILY_UNKNOWN;
+	}
+}
+
+/*
+ * Initialise the GuC parameter block before starting the firmware
+ * transfer. These parameters are read by the firmware on startup
+ * and cannot be changed thereafter.
+ */
+void intel_guc_init_params(struct intel_guc *guc)
+{
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	u32 params[GUC_CTL_MAX_DWORDS];
+	int i;
+
+	memset(params, 0, sizeof(params));
+
+	params[GUC_CTL_DEVICE_INFO] |=
+		(get_gt_type(dev_priv) << GUC_CTL_GT_TYPE_SHIFT) |
+		(get_core_family(dev_priv) << GUC_CTL_CORE_FAMILY_SHIFT);
+
+	/*
+	 * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
+	 * second. This ARAR is calculated by:
+	 * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
+	 */
+	params[GUC_CTL_ARAT_HIGH] = 0;
+	params[GUC_CTL_ARAT_LOW] = 100000000;
+
+	params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
+
+	params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
+			GUC_CTL_VCS2_ENABLED;
+
+	params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
+
+	if (i915_modparams.guc_log_level >= 0) {
+		params[GUC_CTL_DEBUG] =
+			i915_modparams.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
+	} else {
+		params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
+	}
+
+	/* If GuC submission is enabled, set up additional parameters here */
+	if (i915_modparams.enable_guc_submission) {
+		u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
+		u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
+		u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
+
+		params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
+		params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
+
+		pgs >>= PAGE_SHIFT;
+		params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
+			(ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
+
+		params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
+
+		/* Unmask this bit to enable the GuC's internal scheduler */
+		params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
+	}
+
+	/*
+	 * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
+	 * they are power context saved so it's ok to release forcewake
+	 * when we are done here and take it again at xfer time.
+	 */
+	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_BLITTER);
+
+	I915_WRITE(SOFT_SCRATCH(0), 0);
+
+	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
+		I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
+
+	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER);
+}
+
 int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len)
 int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len)
 {
 {
 	WARN(1, "Unexpected send: action=%#x\n", *action);
 	WARN(1, "Unexpected send: action=%#x\n", *action);
@@ -263,3 +356,14 @@ err:
 	i915_gem_object_put(obj);
 	i915_gem_object_put(obj);
 	return vma;
 	return vma;
 }
 }
+
+u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv)
+{
+	u32 wopcm_size = GUC_WOPCM_TOP;
+
+	/* On BXT, the top of WOPCM is reserved for RC6 context */
+	if (IS_GEN9_LP(dev_priv))
+		wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
+
+	return wopcm_size;
+}

+ 13 - 3
drivers/gpu/drm/i915/intel_guc.h

@@ -26,6 +26,7 @@
 #define _INTEL_GUC_H_
 #define _INTEL_GUC_H_
 
 
 #include "intel_uncore.h"
 #include "intel_uncore.h"
+#include "intel_guc_fw.h"
 #include "intel_guc_fwif.h"
 #include "intel_guc_fwif.h"
 #include "intel_guc_ct.h"
 #include "intel_guc_ct.h"
 #include "intel_guc_log.h"
 #include "intel_guc_log.h"
@@ -33,6 +34,11 @@
 #include "i915_guc_reg.h"
 #include "i915_guc_reg.h"
 #include "i915_vma.h"
 #include "i915_vma.h"
 
 
+/*
+ * Top level structure of GuC. It handles firmware loading and manages client
+ * pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
+ * ExecList submission.
+ */
 struct intel_guc {
 struct intel_guc {
 	struct intel_uc_fw fw;
 	struct intel_uc_fw fw;
 	struct intel_guc_log log;
 	struct intel_guc_log log;
@@ -83,6 +89,12 @@ static inline void intel_guc_notify(struct intel_guc *guc)
 	guc->notify(guc);
 	guc->notify(guc);
 }
 }
 
 
+/*
+ * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
+ * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
+ * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
+ * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
+ */
 static inline u32 guc_ggtt_offset(struct i915_vma *vma)
 static inline u32 guc_ggtt_offset(struct i915_vma *vma)
 {
 {
 	u32 offset = i915_ggtt_offset(vma);
 	u32 offset = i915_ggtt_offset(vma);
@@ -95,6 +107,7 @@ static inline u32 guc_ggtt_offset(struct i915_vma *vma)
 
 
 void intel_guc_init_early(struct intel_guc *guc);
 void intel_guc_init_early(struct intel_guc *guc);
 void intel_guc_init_send_regs(struct intel_guc *guc);
 void intel_guc_init_send_regs(struct intel_guc *guc);
+void intel_guc_init_params(struct intel_guc *guc);
 int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len);
 int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len);
 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
 int intel_guc_sample_forcewake(struct intel_guc *guc);
 int intel_guc_sample_forcewake(struct intel_guc *guc);
@@ -102,9 +115,6 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
 int intel_guc_suspend(struct drm_i915_private *dev_priv);
 int intel_guc_suspend(struct drm_i915_private *dev_priv);
 int intel_guc_resume(struct drm_i915_private *dev_priv);
 int intel_guc_resume(struct drm_i915_private *dev_priv);
 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
-
-int intel_guc_select_fw(struct intel_guc *guc);
-int intel_guc_init_hw(struct intel_guc *guc);
 u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
 u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
 
 
 #endif
 #endif

+ 42 - 208
drivers/gpu/drm/i915/intel_guc_loader.c → drivers/gpu/drm/i915/intel_guc_fw.c

@@ -26,31 +26,9 @@
  *    Dave Gordon <david.s.gordon@intel.com>
  *    Dave Gordon <david.s.gordon@intel.com>
  *    Alex Dai <yu.dai@intel.com>
  *    Alex Dai <yu.dai@intel.com>
  */
  */
-#include "i915_drv.h"
-#include "intel_uc.h"
 
 
-/**
- * DOC: GuC-specific firmware loader
- *
- * intel_guc:
- * Top level structure of guc. It handles firmware loading and manages client
- * pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
- * ExecList submission.
- *
- * Firmware versioning:
- * The firmware build process will generate a version header file with major and
- * minor version defined. The versions are built into CSS header of firmware.
- * i915 kernel driver set the minimal firmware version required per platform.
- * The firmware installation package will install (symbolic link) proper version
- * of firmware.
- *
- * GuC address space:
- * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
- * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
- * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
- * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
- *
- */
+#include "intel_guc_fw.h"
+#include "i915_drv.h"
 
 
 #define SKL_FW_MAJOR 6
 #define SKL_FW_MAJOR 6
 #define SKL_FW_MINOR 1
 #define SKL_FW_MINOR 1
@@ -78,88 +56,45 @@ MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
 
 
 #define I915_GLK_GUC_UCODE GUC_FW_PATH(glk, GLK_FW_MAJOR, GLK_FW_MINOR)
 #define I915_GLK_GUC_UCODE GUC_FW_PATH(glk, GLK_FW_MAJOR, GLK_FW_MINOR)
 
 
-
-static u32 get_gttype(struct drm_i915_private *dev_priv)
-{
-	/* XXX: GT type based on PCI device ID? field seems unused by fw */
-	return 0;
-}
-
-static u32 get_core_family(struct drm_i915_private *dev_priv)
-{
-	u32 gen = INTEL_GEN(dev_priv);
-
-	switch (gen) {
-	case 9:
-		return GUC_CORE_FAMILY_GEN9;
-
-	default:
-		MISSING_CASE(gen);
-		return GUC_CORE_FAMILY_UNKNOWN;
-	}
-}
-
-/*
- * Initialise the GuC parameter block before starting the firmware
- * transfer. These parameters are read by the firmware on startup
- * and cannot be changed thereafter.
+/**
+ * intel_guc_fw_select() - selects GuC firmware for uploading
+ *
+ * @guc:	intel_guc struct
+ *
+ * Return: zero when we know firmware, non-zero in other case
  */
  */
-static void guc_params_init(struct drm_i915_private *dev_priv)
+int intel_guc_fw_select(struct intel_guc *guc)
 {
 {
-	struct intel_guc *guc = &dev_priv->guc;
-	u32 params[GUC_CTL_MAX_DWORDS];
-	int i;
-
-	memset(&params, 0, sizeof(params));
-
-	params[GUC_CTL_DEVICE_INFO] |=
-		(get_gttype(dev_priv) << GUC_CTL_GTTYPE_SHIFT) |
-		(get_core_family(dev_priv) << GUC_CTL_COREFAMILY_SHIFT);
-
-	/*
-	 * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
-	 * second. This ARAR is calculated by:
-	 * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
-	 */
-	params[GUC_CTL_ARAT_HIGH] = 0;
-	params[GUC_CTL_ARAT_LOW] = 100000000;
-
-	params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
-
-	params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
-			GUC_CTL_VCS2_ENABLED;
-
-	params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
-
-	if (i915_modparams.guc_log_level >= 0) {
-		params[GUC_CTL_DEBUG] =
-			i915_modparams.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
-	} else
-		params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
-
-	/* If GuC submission is enabled, set up additional parameters here */
-	if (i915_modparams.enable_guc_submission) {
-		u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
-		u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
-		u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
-
-		params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
-		params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
-
-		pgs >>= PAGE_SHIFT;
-		params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
-			(ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 
 
-		params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
+	intel_uc_fw_init(&guc->fw, INTEL_UC_FW_TYPE_GUC);
 
 
-		/* Unmask this bit to enable the GuC's internal scheduler */
-		params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
+	if (i915_modparams.guc_firmware_path) {
+		guc->fw.path = i915_modparams.guc_firmware_path;
+		guc->fw.major_ver_wanted = 0;
+		guc->fw.minor_ver_wanted = 0;
+	} else if (IS_SKYLAKE(dev_priv)) {
+		guc->fw.path = I915_SKL_GUC_UCODE;
+		guc->fw.major_ver_wanted = SKL_FW_MAJOR;
+		guc->fw.minor_ver_wanted = SKL_FW_MINOR;
+	} else if (IS_BROXTON(dev_priv)) {
+		guc->fw.path = I915_BXT_GUC_UCODE;
+		guc->fw.major_ver_wanted = BXT_FW_MAJOR;
+		guc->fw.minor_ver_wanted = BXT_FW_MINOR;
+	} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
+		guc->fw.path = I915_KBL_GUC_UCODE;
+		guc->fw.major_ver_wanted = KBL_FW_MAJOR;
+		guc->fw.minor_ver_wanted = KBL_FW_MINOR;
+	} else if (IS_GEMINILAKE(dev_priv)) {
+		guc->fw.path = I915_GLK_GUC_UCODE;
+		guc->fw.major_ver_wanted = GLK_FW_MAJOR;
+		guc->fw.minor_ver_wanted = GLK_FW_MINOR;
+	} else {
+		DRM_ERROR("No GuC firmware known for platform with GuC!\n");
+		return -ENOENT;
 	}
 	}
 
 
-	I915_WRITE(SOFT_SCRATCH(0), 0);
-
-	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
-		I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
+	return 0;
 }
 }
 
 
 /*
 /*
@@ -250,38 +185,16 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
 	return ret;
 	return ret;
 }
 }
 
 
-u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv)
-{
-	u32 wopcm_size = GUC_WOPCM_TOP;
-
-	/* On BXT, the top of WOPCM is reserved for RC6 context */
-	if (IS_GEN9_LP(dev_priv))
-		wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
-
-	return wopcm_size;
-}
-
 /*
 /*
  * Load the GuC firmware blob into the MinuteIA.
  * Load the GuC firmware blob into the MinuteIA.
  */
  */
-static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
+static int guc_ucode_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
 {
 {
-	struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
-	struct i915_vma *vma;
+	struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	int ret;
 	int ret;
 
 
-	ret = i915_gem_object_set_to_gtt_domain(guc_fw->obj, false);
-	if (ret) {
-		DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
-		return ret;
-	}
-
-	vma = i915_gem_object_ggtt_pin(guc_fw->obj, NULL, 0, 0,
-				       PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
-	if (IS_ERR(vma)) {
-		DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
-		return PTR_ERR(vma);
-	}
+	GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
 
 
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
 
@@ -312,23 +225,15 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
 		I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
 		I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
 	}
 	}
 
 
-	guc_params_init(dev_priv);
-
 	ret = guc_ucode_xfer_dma(dev_priv, vma);
 	ret = guc_ucode_xfer_dma(dev_priv, vma);
 
 
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
 
-	/*
-	 * We keep the object pages for reuse during resume. But we can unpin it
-	 * now that DMA has completed, so it doesn't continue to take up space.
-	 */
-	i915_vma_unpin(vma);
-
 	return ret;
 	return ret;
 }
 }
 
 
 /**
 /**
- * intel_guc_init_hw() - finish preparing the GuC for activity
+ * intel_guc_fw_upload() - finish preparing the GuC for activity
  * @guc: intel_guc structure
  * @guc: intel_guc structure
  *
  *
  * Called during driver loading and also after a GPU reset.
  * Called during driver loading and also after a GPU reset.
@@ -340,78 +245,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
  *
  *
  * Return:	non-zero code on error
  * Return:	non-zero code on error
  */
  */
-int intel_guc_init_hw(struct intel_guc *guc)
-{
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	const char *fw_path = guc->fw.path;
-	int ret;
-
-	DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
-		fw_path,
-		intel_uc_fw_status_repr(guc->fw.fetch_status),
-		intel_uc_fw_status_repr(guc->fw.load_status));
-
-	if (guc->fw.fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
-		return -EIO;
-
-	guc->fw.load_status = INTEL_UC_FIRMWARE_PENDING;
-
-	DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
-		intel_uc_fw_status_repr(guc->fw.fetch_status),
-		intel_uc_fw_status_repr(guc->fw.load_status));
-
-	ret = guc_ucode_xfer(dev_priv);
-
-	if (ret)
-		return -EAGAIN;
-
-	guc->fw.load_status = INTEL_UC_FIRMWARE_SUCCESS;
-
-	DRM_INFO("GuC %s (firmware %s [version %u.%u])\n",
-		 i915_modparams.enable_guc_submission ? "submission enabled" :
-							"loaded",
-		 guc->fw.path,
-		 guc->fw.major_ver_found, guc->fw.minor_ver_found);
-
-	return 0;
-}
-
-/**
- * intel_guc_select_fw() - selects GuC firmware for loading
- * @guc:	intel_guc struct
- *
- * Return: zero when we know firmware, non-zero in other case
- */
-int intel_guc_select_fw(struct intel_guc *guc)
+int intel_guc_fw_upload(struct intel_guc *guc)
 {
 {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
-	intel_uc_fw_init(&guc->fw, INTEL_UC_FW_TYPE_GUC);
-
-	if (i915_modparams.guc_firmware_path) {
-		guc->fw.path = i915_modparams.guc_firmware_path;
-		guc->fw.major_ver_wanted = 0;
-		guc->fw.minor_ver_wanted = 0;
-	} else if (IS_SKYLAKE(dev_priv)) {
-		guc->fw.path = I915_SKL_GUC_UCODE;
-		guc->fw.major_ver_wanted = SKL_FW_MAJOR;
-		guc->fw.minor_ver_wanted = SKL_FW_MINOR;
-	} else if (IS_BROXTON(dev_priv)) {
-		guc->fw.path = I915_BXT_GUC_UCODE;
-		guc->fw.major_ver_wanted = BXT_FW_MAJOR;
-		guc->fw.minor_ver_wanted = BXT_FW_MINOR;
-	} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
-		guc->fw.path = I915_KBL_GUC_UCODE;
-		guc->fw.major_ver_wanted = KBL_FW_MAJOR;
-		guc->fw.minor_ver_wanted = KBL_FW_MINOR;
-	} else if (IS_GEMINILAKE(dev_priv)) {
-		guc->fw.path = I915_GLK_GUC_UCODE;
-		guc->fw.major_ver_wanted = GLK_FW_MAJOR;
-		guc->fw.minor_ver_wanted = GLK_FW_MINOR;
-	} else {
-		DRM_ERROR("No GuC firmware known for platform with GuC!\n");
-		return -ENOENT;
-	}
-
-	return 0;
+	return intel_uc_fw_upload(&guc->fw, guc_ucode_xfer);
 }
 }

+ 33 - 0
drivers/gpu/drm/i915/intel_guc_fw.h

@@ -0,0 +1,33 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_GUC_FW_H_
+#define _INTEL_GUC_FW_H_
+
+struct intel_guc;
+
+int intel_guc_fw_select(struct intel_guc *guc);
+int intel_guc_fw_upload(struct intel_guc *guc);
+
+#endif

+ 2 - 2
drivers/gpu/drm/i915/intel_guc_fwif.h

@@ -82,8 +82,8 @@
 #define GUC_CTL_ARAT_LOW		2
 #define GUC_CTL_ARAT_LOW		2
 
 
 #define GUC_CTL_DEVICE_INFO		3
 #define GUC_CTL_DEVICE_INFO		3
-#define   GUC_CTL_GTTYPE_SHIFT		0
-#define   GUC_CTL_COREFAMILY_SHIFT	7
+#define   GUC_CTL_GT_TYPE_SHIFT		0
+#define   GUC_CTL_CORE_FAMILY_SHIFT	7
 
 
 #define GUC_CTL_LOG_PARAMS		4
 #define GUC_CTL_LOG_PARAMS		4
 #define   GUC_LOG_VALID			(1 << 0)
 #define   GUC_LOG_VALID			(1 << 0)

+ 16 - 10
drivers/gpu/drm/i915/intel_hdmi.c

@@ -70,7 +70,7 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
 	return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
 	return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
 }
 }
 
 
-static u32 g4x_infoframe_index(enum hdmi_infoframe_type type)
+static u32 g4x_infoframe_index(unsigned int type)
 {
 {
 	switch (type) {
 	switch (type) {
 	case HDMI_INFOFRAME_TYPE_AVI:
 	case HDMI_INFOFRAME_TYPE_AVI:
@@ -85,7 +85,7 @@ static u32 g4x_infoframe_index(enum hdmi_infoframe_type type)
 	}
 	}
 }
 }
 
 
-static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type)
+static u32 g4x_infoframe_enable(unsigned int type)
 {
 {
 	switch (type) {
 	switch (type) {
 	case HDMI_INFOFRAME_TYPE_AVI:
 	case HDMI_INFOFRAME_TYPE_AVI:
@@ -100,9 +100,11 @@ static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type)
 	}
 	}
 }
 }
 
 
-static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
+static u32 hsw_infoframe_enable(unsigned int type)
 {
 {
 	switch (type) {
 	switch (type) {
+	case DP_SDP_VSC:
+		return VIDEO_DIP_ENABLE_VSC_HSW;
 	case HDMI_INFOFRAME_TYPE_AVI:
 	case HDMI_INFOFRAME_TYPE_AVI:
 		return VIDEO_DIP_ENABLE_AVI_HSW;
 		return VIDEO_DIP_ENABLE_AVI_HSW;
 	case HDMI_INFOFRAME_TYPE_SPD:
 	case HDMI_INFOFRAME_TYPE_SPD:
@@ -118,10 +120,12 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
 static i915_reg_t
 static i915_reg_t
 hsw_dip_data_reg(struct drm_i915_private *dev_priv,
 hsw_dip_data_reg(struct drm_i915_private *dev_priv,
 		 enum transcoder cpu_transcoder,
 		 enum transcoder cpu_transcoder,
-		 enum hdmi_infoframe_type type,
+		 unsigned int type,
 		 int i)
 		 int i)
 {
 {
 	switch (type) {
 	switch (type) {
+	case DP_SDP_VSC:
+		return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i);
 	case HDMI_INFOFRAME_TYPE_AVI:
 	case HDMI_INFOFRAME_TYPE_AVI:
 		return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i);
 		return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i);
 	case HDMI_INFOFRAME_TYPE_SPD:
 	case HDMI_INFOFRAME_TYPE_SPD:
@@ -136,7 +140,7 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
 
 
 static void g4x_write_infoframe(struct drm_encoder *encoder,
 static void g4x_write_infoframe(struct drm_encoder *encoder,
 				const struct intel_crtc_state *crtc_state,
 				const struct intel_crtc_state *crtc_state,
-				enum hdmi_infoframe_type type,
+				unsigned int type,
 				const void *frame, ssize_t len)
 				const void *frame, ssize_t len)
 {
 {
 	const uint32_t *data = frame;
 	const uint32_t *data = frame;
@@ -191,7 +195,7 @@ static bool g4x_infoframe_enabled(struct drm_encoder *encoder,
 
 
 static void ibx_write_infoframe(struct drm_encoder *encoder,
 static void ibx_write_infoframe(struct drm_encoder *encoder,
 				const struct intel_crtc_state *crtc_state,
 				const struct intel_crtc_state *crtc_state,
-				enum hdmi_infoframe_type type,
+				unsigned int type,
 				const void *frame, ssize_t len)
 				const void *frame, ssize_t len)
 {
 {
 	const uint32_t *data = frame;
 	const uint32_t *data = frame;
@@ -251,7 +255,7 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
 
 
 static void cpt_write_infoframe(struct drm_encoder *encoder,
 static void cpt_write_infoframe(struct drm_encoder *encoder,
 				const struct intel_crtc_state *crtc_state,
 				const struct intel_crtc_state *crtc_state,
-				enum hdmi_infoframe_type type,
+				unsigned int type,
 				const void *frame, ssize_t len)
 				const void *frame, ssize_t len)
 {
 {
 	const uint32_t *data = frame;
 	const uint32_t *data = frame;
@@ -309,7 +313,7 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
 
 
 static void vlv_write_infoframe(struct drm_encoder *encoder,
 static void vlv_write_infoframe(struct drm_encoder *encoder,
 				const struct intel_crtc_state *crtc_state,
 				const struct intel_crtc_state *crtc_state,
-				enum hdmi_infoframe_type type,
+				unsigned int type,
 				const void *frame, ssize_t len)
 				const void *frame, ssize_t len)
 {
 {
 	const uint32_t *data = frame;
 	const uint32_t *data = frame;
@@ -368,7 +372,7 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
 
 
 static void hsw_write_infoframe(struct drm_encoder *encoder,
 static void hsw_write_infoframe(struct drm_encoder *encoder,
 				const struct intel_crtc_state *crtc_state,
 				const struct intel_crtc_state *crtc_state,
-				enum hdmi_infoframe_type type,
+				unsigned int type,
 				const void *frame, ssize_t len)
 				const void *frame, ssize_t len)
 {
 {
 	const uint32_t *data = frame;
 	const uint32_t *data = frame;
@@ -377,6 +381,8 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 	i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
 	i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
 	i915_reg_t data_reg;
 	i915_reg_t data_reg;
+	int data_size = type == DP_SDP_VSC ?
+		VIDEO_DIP_VSC_DATA_SIZE : VIDEO_DIP_DATA_SIZE;
 	int i;
 	int i;
 	u32 val = I915_READ(ctl_reg);
 	u32 val = I915_READ(ctl_reg);
 
 
@@ -392,7 +398,7 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
 		data++;
 		data++;
 	}
 	}
 	/* Write every possible data byte to force correct ECC calculation. */
 	/* Write every possible data byte to force correct ECC calculation. */
-	for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+	for (; i < data_size; i += 4)
 		I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
 		I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
 					    type, i >> 2), 0);
 					    type, i >> 2), 0);
 	mmiowb();
 	mmiowb();

+ 41 - 84
drivers/gpu/drm/i915/intel_huc.c

@@ -77,6 +77,42 @@ MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
 #define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \
 #define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \
 	GLK_HUC_FW_MINOR, GLK_BLD_NUM)
 	GLK_HUC_FW_MINOR, GLK_BLD_NUM)
 
 
+/**
+ * intel_huc_select_fw() - selects HuC firmware for loading
+ * @huc:	intel_huc struct
+ */
+void intel_huc_select_fw(struct intel_huc *huc)
+{
+	struct drm_i915_private *dev_priv = huc_to_i915(huc);
+
+	intel_uc_fw_init(&huc->fw, INTEL_UC_FW_TYPE_HUC);
+
+	if (i915_modparams.huc_firmware_path) {
+		huc->fw.path = i915_modparams.huc_firmware_path;
+		huc->fw.major_ver_wanted = 0;
+		huc->fw.minor_ver_wanted = 0;
+	} else if (IS_SKYLAKE(dev_priv)) {
+		huc->fw.path = I915_SKL_HUC_UCODE;
+		huc->fw.major_ver_wanted = SKL_HUC_FW_MAJOR;
+		huc->fw.minor_ver_wanted = SKL_HUC_FW_MINOR;
+	} else if (IS_BROXTON(dev_priv)) {
+		huc->fw.path = I915_BXT_HUC_UCODE;
+		huc->fw.major_ver_wanted = BXT_HUC_FW_MAJOR;
+		huc->fw.minor_ver_wanted = BXT_HUC_FW_MINOR;
+	} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
+		huc->fw.path = I915_KBL_HUC_UCODE;
+		huc->fw.major_ver_wanted = KBL_HUC_FW_MAJOR;
+		huc->fw.minor_ver_wanted = KBL_HUC_FW_MINOR;
+	} else if (IS_GEMINILAKE(dev_priv)) {
+		huc->fw.path = I915_GLK_HUC_UCODE;
+		huc->fw.major_ver_wanted = GLK_HUC_FW_MAJOR;
+		huc->fw.minor_ver_wanted = GLK_HUC_FW_MINOR;
+	} else {
+		DRM_ERROR("No HuC firmware known for platform with HuC!\n");
+		return;
+	}
+}
+
 /**
 /**
  * huc_ucode_xfer() - DMA's the firmware
  * huc_ucode_xfer() - DMA's the firmware
  * @dev_priv: the drm_i915_private device
  * @dev_priv: the drm_i915_private device
@@ -85,26 +121,15 @@ MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
  *
  *
  * Return: 0 on success, non-zero on failure
  * Return: 0 on success, non-zero on failure
  */
  */
-static int huc_ucode_xfer(struct drm_i915_private *dev_priv)
+static int huc_ucode_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
 {
 {
-	struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
-	struct i915_vma *vma;
+	struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
+	struct drm_i915_private *dev_priv = huc_to_i915(huc);
 	unsigned long offset = 0;
 	unsigned long offset = 0;
 	u32 size;
 	u32 size;
 	int ret;
 	int ret;
 
 
-	ret = i915_gem_object_set_to_gtt_domain(huc_fw->obj, false);
-	if (ret) {
-		DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
-		return ret;
-	}
-
-	vma = i915_gem_object_ggtt_pin(huc_fw->obj, NULL, 0, 0,
-				PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
-	if (IS_ERR(vma)) {
-		DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
-		return PTR_ERR(vma);
-	}
+	GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
 
 
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
 
@@ -135,51 +160,9 @@ static int huc_ucode_xfer(struct drm_i915_private *dev_priv)
 
 
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
 
-	/*
-	 * We keep the object pages for reuse during resume. But we can unpin it
-	 * now that DMA has completed, so it doesn't continue to take up space.
-	 */
-	i915_vma_unpin(vma);
-
 	return ret;
 	return ret;
 }
 }
 
 
-/**
- * intel_huc_select_fw() - selects HuC firmware for loading
- * @huc:	intel_huc struct
- */
-void intel_huc_select_fw(struct intel_huc *huc)
-{
-	struct drm_i915_private *dev_priv = huc_to_i915(huc);
-
-	intel_uc_fw_init(&huc->fw, INTEL_UC_FW_TYPE_HUC);
-
-	if (i915_modparams.huc_firmware_path) {
-		huc->fw.path = i915_modparams.huc_firmware_path;
-		huc->fw.major_ver_wanted = 0;
-		huc->fw.minor_ver_wanted = 0;
-	} else if (IS_SKYLAKE(dev_priv)) {
-		huc->fw.path = I915_SKL_HUC_UCODE;
-		huc->fw.major_ver_wanted = SKL_HUC_FW_MAJOR;
-		huc->fw.minor_ver_wanted = SKL_HUC_FW_MINOR;
-	} else if (IS_BROXTON(dev_priv)) {
-		huc->fw.path = I915_BXT_HUC_UCODE;
-		huc->fw.major_ver_wanted = BXT_HUC_FW_MAJOR;
-		huc->fw.minor_ver_wanted = BXT_HUC_FW_MINOR;
-	} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
-		huc->fw.path = I915_KBL_HUC_UCODE;
-		huc->fw.major_ver_wanted = KBL_HUC_FW_MAJOR;
-		huc->fw.minor_ver_wanted = KBL_HUC_FW_MINOR;
-	} else if (IS_GEMINILAKE(dev_priv)) {
-		huc->fw.path = I915_GLK_HUC_UCODE;
-		huc->fw.major_ver_wanted = GLK_HUC_FW_MAJOR;
-		huc->fw.minor_ver_wanted = GLK_HUC_FW_MINOR;
-	} else {
-		DRM_ERROR("No HuC firmware known for platform with HuC!\n");
-		return;
-	}
-}
-
 /**
 /**
  * intel_huc_init_hw() - load HuC uCode to device
  * intel_huc_init_hw() - load HuC uCode to device
  * @huc: intel_huc structure
  * @huc: intel_huc structure
@@ -194,33 +177,7 @@ void intel_huc_select_fw(struct intel_huc *huc)
  */
  */
 void intel_huc_init_hw(struct intel_huc *huc)
 void intel_huc_init_hw(struct intel_huc *huc)
 {
 {
-	struct drm_i915_private *dev_priv = huc_to_i915(huc);
-	int err;
-
-	DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
-		huc->fw.path,
-		intel_uc_fw_status_repr(huc->fw.fetch_status),
-		intel_uc_fw_status_repr(huc->fw.load_status));
-
-	if (huc->fw.fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
-		return;
-
-	huc->fw.load_status = INTEL_UC_FIRMWARE_PENDING;
-
-	err = huc_ucode_xfer(dev_priv);
-
-	huc->fw.load_status = err ?
-		INTEL_UC_FIRMWARE_FAIL : INTEL_UC_FIRMWARE_SUCCESS;
-
-	DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
-		huc->fw.path,
-		intel_uc_fw_status_repr(huc->fw.fetch_status),
-		intel_uc_fw_status_repr(huc->fw.load_status));
-
-	if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
-		DRM_ERROR("Failed to complete HuC uCode load with ret %d\n", err);
-
-	return;
+	intel_uc_fw_upload(&huc->fw, huc_ucode_xfer);
 }
 }
 
 
 /**
 /**

+ 2 - 1
drivers/gpu/drm/i915/intel_lrc.c

@@ -793,7 +793,6 @@ static void intel_lrc_irq_handler(unsigned long data)
 			&engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
 			&engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
 		unsigned int head, tail;
 		unsigned int head, tail;
 
 
-		/* However GVT emulation depends upon intercepting CSB mmio */
 		if (unlikely(execlists->csb_use_mmio)) {
 		if (unlikely(execlists->csb_use_mmio)) {
 			buf = (u32 * __force)
 			buf = (u32 * __force)
 				(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
 				(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
@@ -1094,6 +1093,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
 		i915_ggtt_offset(ce->ring->vma);
 		i915_ggtt_offset(ce->ring->vma);
 
 
 	ce->state->obj->mm.dirty = true;
 	ce->state->obj->mm.dirty = true;
+	ce->state->obj->pin_global++;
 
 
 	i915_gem_context_get(ctx);
 	i915_gem_context_get(ctx);
 out:
 out:
@@ -1121,6 +1121,7 @@ static void execlists_context_unpin(struct intel_engine_cs *engine,
 
 
 	intel_ring_unpin(ce->ring);
 	intel_ring_unpin(ce->ring);
 
 
+	ce->state->obj->pin_global--;
 	i915_gem_object_unpin_map(ce->state->obj);
 	i915_gem_object_unpin_map(ce->state->obj);
 	i915_vma_unpin(ce->state);
 	i915_vma_unpin(ce->state);
 
 

+ 15 - 7
drivers/gpu/drm/i915/intel_lspcon.c

@@ -56,7 +56,7 @@ static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
 	struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
 	struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
 
 
 	if (drm_lspcon_get_mode(adapter, &current_mode)) {
 	if (drm_lspcon_get_mode(adapter, &current_mode)) {
-		DRM_ERROR("Error reading LSPCON mode\n");
+		DRM_DEBUG_KMS("Error reading LSPCON mode\n");
 		return DRM_LSPCON_MODE_INVALID;
 		return DRM_LSPCON_MODE_INVALID;
 	}
 	}
 	return current_mode;
 	return current_mode;
@@ -68,16 +68,15 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
 	enum drm_lspcon_mode current_mode;
 	enum drm_lspcon_mode current_mode;
 
 
 	current_mode = lspcon_get_current_mode(lspcon);
 	current_mode = lspcon_get_current_mode(lspcon);
-	if (current_mode == mode || current_mode == DRM_LSPCON_MODE_INVALID)
+	if (current_mode == mode)
 		goto out;
 		goto out;
 
 
 	DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n",
 	DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n",
 		      lspcon_mode_name(mode));
 		      lspcon_mode_name(mode));
 
 
-	wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode ||
-		 current_mode == DRM_LSPCON_MODE_INVALID, 100);
+	wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100);
 	if (current_mode != mode)
 	if (current_mode != mode)
-		DRM_DEBUG_KMS("LSPCON mode hasn't settled\n");
+		DRM_ERROR("LSPCON mode hasn't settled\n");
 
 
 out:
 out:
 	DRM_DEBUG_KMS("Current LSPCON mode %s\n",
 	DRM_DEBUG_KMS("Current LSPCON mode %s\n",
@@ -133,6 +132,7 @@ static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
 
 
 static bool lspcon_probe(struct intel_lspcon *lspcon)
 static bool lspcon_probe(struct intel_lspcon *lspcon)
 {
 {
+	int retry;
 	enum drm_dp_dual_mode_type adaptor_type;
 	enum drm_dp_dual_mode_type adaptor_type;
 	struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
 	struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
 	enum drm_lspcon_mode expected_mode;
 	enum drm_lspcon_mode expected_mode;
@@ -141,10 +141,18 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
 			DRM_LSPCON_MODE_PCON : DRM_LSPCON_MODE_LS;
 			DRM_LSPCON_MODE_PCON : DRM_LSPCON_MODE_LS;
 
 
 	/* Lets probe the adaptor and check its type */
 	/* Lets probe the adaptor and check its type */
-	adaptor_type = drm_dp_dual_mode_detect(adapter);
+	for (retry = 0; retry < 6; retry++) {
+		if (retry)
+			usleep_range(500, 1000);
+
+		adaptor_type = drm_dp_dual_mode_detect(adapter);
+		if (adaptor_type == DRM_DP_DUAL_MODE_LSPCON)
+			break;
+	}
+
 	if (adaptor_type != DRM_DP_DUAL_MODE_LSPCON) {
 	if (adaptor_type != DRM_DP_DUAL_MODE_LSPCON) {
 		DRM_DEBUG_KMS("No LSPCON detected, found %s\n",
 		DRM_DEBUG_KMS("No LSPCON detected, found %s\n",
-			drm_dp_get_dual_mode_type_name(adaptor_type));
+			       drm_dp_get_dual_mode_type_name(adaptor_type));
 		return false;
 		return false;
 	}
 	}
 
 

+ 9 - 2
drivers/gpu/drm/i915/intel_pm.c

@@ -6591,7 +6591,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
 {
 {
 	struct intel_engine_cs *engine;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 	enum intel_engine_id id;
-	uint32_t rc6_mask = 0;
+	u32 rc6_mode, rc6_mask = 0;
 
 
 	/* 1a: Software RC state - RC0 */
 	/* 1a: Software RC state - RC0 */
 	I915_WRITE(GEN6_RC_STATE, 0);
 	I915_WRITE(GEN6_RC_STATE, 0);
@@ -6629,8 +6629,15 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
 		rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
 		rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
 	DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
 	DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
 	I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
 	I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
+
+	/* WaRsUseTimeoutMode:cnl (pre-prod) */
+	if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_C0))
+		rc6_mode = GEN7_RC_CTL_TO_MODE;
+	else
+		rc6_mode = GEN6_RC_CTL_EI_MODE(1);
+
 	I915_WRITE(GEN6_RC_CONTROL,
 	I915_WRITE(GEN6_RC_CONTROL,
-		   GEN6_RC_CTL_HW_ENABLE | GEN6_RC_CTL_EI_MODE(1) | rc6_mask);
+		   GEN6_RC_CTL_HW_ENABLE | rc6_mode | rc6_mask);
 
 
 	/*
 	/*
 	 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
 	 * 3b: Enable Coarse Power Gating only when RC6 is enabled.

+ 47 - 72
drivers/gpu/drm/i915/intel_psr.c

@@ -58,6 +58,9 @@
 
 
 static bool is_edp_psr(struct intel_dp *intel_dp)
 static bool is_edp_psr(struct intel_dp *intel_dp)
 {
 {
+	if (!intel_dp_is_edp(intel_dp))
+		return false;
+
 	return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
 	return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
 }
 }
 
 
@@ -72,37 +75,6 @@ static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
 	       (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
 	       (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
 }
 }
 
 
-static void intel_psr_write_vsc(struct intel_dp *intel_dp,
-				const struct edp_vsc_psr *vsc_psr)
-{
-	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_device *dev = dig_port->base.base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
-	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
-	i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
-	uint32_t *data = (uint32_t *) vsc_psr;
-	unsigned int i;
-
-	/* As per BSPec (Pipe Video Data Island Packet), we need to disable
-	   the video DIP being updated before program video DIP data buffer
-	   registers for DIP being updated. */
-	I915_WRITE(ctl_reg, 0);
-	POSTING_READ(ctl_reg);
-
-	for (i = 0; i < sizeof(*vsc_psr); i += 4) {
-		I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
-						   i >> 2), *data);
-		data++;
-	}
-	for (; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4)
-		I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
-						   i >> 2), 0);
-
-	I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
-	POSTING_READ(ctl_reg);
-}
-
 static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
 static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
 			      const struct intel_crtc_state *crtc_state)
 			      const struct intel_crtc_state *crtc_state)
 {
 {
@@ -149,7 +121,8 @@ static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
 		psr_vsc.sdp_header.HB3 = 0x8;
 		psr_vsc.sdp_header.HB3 = 0x8;
 	}
 	}
 
 
-	intel_psr_write_vsc(intel_dp, &psr_vsc);
+	intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state,
+					DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
 }
 }
 
 
 static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
 static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
@@ -376,22 +349,25 @@ static void hsw_psr_activate(struct intel_dp *intel_dp)
 		hsw_activate_psr1(intel_dp);
 		hsw_activate_psr1(intel_dp);
 }
 }
 
 
-static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
+void intel_psr_compute_config(struct intel_dp *intel_dp,
+			      struct intel_crtc_state *crtc_state)
 {
 {
 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-	struct drm_device *dev = dig_port->base.base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_crtc *crtc = dig_port->base.base.crtc;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 	const struct drm_display_mode *adjusted_mode =
 	const struct drm_display_mode *adjusted_mode =
-		&intel_crtc->config->base.adjusted_mode;
+		&crtc_state->base.adjusted_mode;
 	int psr_setup_time;
 	int psr_setup_time;
 
 
-	lockdep_assert_held(&dev_priv->psr.lock);
-	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
-	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+	if (!HAS_PSR(dev_priv))
+		return;
+
+	if (!is_edp_psr(intel_dp))
+		return;
 
 
-	dev_priv->psr.source_ok = false;
+	if (!i915_modparams.enable_psr) {
+		DRM_DEBUG_KMS("PSR disable by flag\n");
+		return;
+	}
 
 
 	/*
 	/*
 	 * HSW spec explicitly says PSR is tied to port A.
 	 * HSW spec explicitly says PSR is tied to port A.
@@ -402,66 +378,70 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
 	 */
 	 */
 	if (HAS_DDI(dev_priv) && dig_port->port != PORT_A) {
 	if (HAS_DDI(dev_priv) && dig_port->port != PORT_A) {
 		DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
 		DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
-		return false;
-	}
-
-	if (!i915_modparams.enable_psr) {
-		DRM_DEBUG_KMS("PSR disable by flag\n");
-		return false;
+		return;
 	}
 	}
 
 
 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 	    !dev_priv->psr.link_standby) {
 	    !dev_priv->psr.link_standby) {
 		DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
 		DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
-		return false;
+		return;
 	}
 	}
 
 
 	if (IS_HASWELL(dev_priv) &&
 	if (IS_HASWELL(dev_priv) &&
-	    I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
+	    I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
 		      S3D_ENABLE) {
 		      S3D_ENABLE) {
 		DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
 		DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
-		return false;
+		return;
 	}
 	}
 
 
 	if (IS_HASWELL(dev_priv) &&
 	if (IS_HASWELL(dev_priv) &&
 	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
 	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
 		DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
 		DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
-		return false;
+		return;
 	}
 	}
 
 
 	psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
 	psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
 	if (psr_setup_time < 0) {
 	if (psr_setup_time < 0) {
 		DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
 		DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
 			      intel_dp->psr_dpcd[1]);
 			      intel_dp->psr_dpcd[1]);
-		return false;
+		return;
 	}
 	}
 
 
 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
 		DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
 		DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
 			      psr_setup_time);
 			      psr_setup_time);
-		return false;
+		return;
+	}
+
+	/*
+	 * FIXME psr2_support is messed up. It's both computed
+	 * dynamically during PSR enable, and extracted from sink
+	 * caps during eDP detection.
+	 */
+	if (!dev_priv->psr.psr2_support) {
+		crtc_state->has_psr = true;
+		return;
 	}
 	}
 
 
 	/* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
 	/* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
-	if (dev_priv->psr.psr2_support &&
-	    (intel_crtc->config->pipe_src_w > 3200 ||
-	     intel_crtc->config->pipe_src_h > 2000)) {
-		dev_priv->psr.psr2_support = false;
-		return false;
+	if (adjusted_mode->crtc_hdisplay > 3200 ||
+	    adjusted_mode->crtc_vdisplay > 2000) {
+		DRM_DEBUG_KMS("PSR2 disabled, panel resolution too big\n");
+		return;
 	}
 	}
 
 
 	/*
 	/*
 	 * FIXME:enable psr2 only for y-cordinate psr2 panels
 	 * FIXME:enable psr2 only for y-cordinate psr2 panels
 	 * After gtc implementation , remove this restriction.
 	 * After gtc implementation , remove this restriction.
 	 */
 	 */
-	if (!dev_priv->psr.y_cord_support &&  dev_priv->psr.psr2_support) {
+	if (!dev_priv->psr.y_cord_support) {
 		DRM_DEBUG_KMS("PSR2 disabled, panel does not support Y coordinate\n");
 		DRM_DEBUG_KMS("PSR2 disabled, panel does not support Y coordinate\n");
-		return false;
+		return;
 	}
 	}
 
 
-	dev_priv->psr.source_ok = true;
-	return true;
+	crtc_state->has_psr = true;
+	crtc_state->has_psr2 = true;
 }
 }
 
 
 static void intel_psr_activate(struct intel_dp *intel_dp)
 static void intel_psr_activate(struct intel_dp *intel_dp)
@@ -531,13 +511,8 @@ void intel_psr_enable(struct intel_dp *intel_dp,
 	struct drm_device *dev = intel_dig_port->base.base.dev;
 	struct drm_device *dev = intel_dig_port->base.base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
 
-	if (!HAS_PSR(dev_priv))
-		return;
-
-	if (!is_edp_psr(intel_dp)) {
-		DRM_DEBUG_KMS("PSR not supported by this panel\n");
+	if (!crtc_state->has_psr)
 		return;
 		return;
-	}
 
 
 	WARN_ON(dev_priv->drrs.dp);
 	WARN_ON(dev_priv->drrs.dp);
 	mutex_lock(&dev_priv->psr.lock);
 	mutex_lock(&dev_priv->psr.lock);
@@ -546,8 +521,8 @@ void intel_psr_enable(struct intel_dp *intel_dp,
 		goto unlock;
 		goto unlock;
 	}
 	}
 
 
-	if (!intel_psr_match_conditions(intel_dp))
-		goto unlock;
+	dev_priv->psr.psr2_support = crtc_state->has_psr2;
+	dev_priv->psr.source_ok = true;
 
 
 	dev_priv->psr.busy_frontbuffer_bits = 0;
 	dev_priv->psr.busy_frontbuffer_bits = 0;
 
 
@@ -668,7 +643,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
 	struct drm_device *dev = intel_dig_port->base.base.dev;
 	struct drm_device *dev = intel_dig_port->base.base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
 
-	if (!HAS_PSR(dev_priv))
+	if (!old_crtc_state->has_psr)
 		return;
 		return;
 
 
 	mutex_lock(&dev_priv->psr.lock);
 	mutex_lock(&dev_priv->psr.lock);

+ 10 - 6
drivers/gpu/drm/i915/intel_ringbuffer.c

@@ -484,11 +484,6 @@ static bool stop_ring(struct intel_engine_cs *engine)
 	I915_WRITE_HEAD(engine, 0);
 	I915_WRITE_HEAD(engine, 0);
 	I915_WRITE_TAIL(engine, 0);
 	I915_WRITE_TAIL(engine, 0);
 
 
-	if (INTEL_GEN(dev_priv) > 2) {
-		(void)I915_READ_CTL(engine);
-		I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
-	}
-
 	return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
 	return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
 }
 }
 
 
@@ -570,6 +565,9 @@ static int init_ring_common(struct intel_engine_cs *engine)
 
 
 	intel_engine_init_hangcheck(engine);
 	intel_engine_init_hangcheck(engine);
 
 
+	if (INTEL_GEN(dev_priv) > 2)
+		I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
+
 out:
 out:
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
 
@@ -1246,6 +1244,8 @@ int intel_ring_pin(struct intel_ring *ring,
 	if (IS_ERR(addr))
 	if (IS_ERR(addr))
 		goto err;
 		goto err;
 
 
+	vma->obj->pin_global++;
+
 	ring->vaddr = addr;
 	ring->vaddr = addr;
 	return 0;
 	return 0;
 
 
@@ -1277,6 +1277,7 @@ void intel_ring_unpin(struct intel_ring *ring)
 		i915_gem_object_unpin_map(ring->vma->obj);
 		i915_gem_object_unpin_map(ring->vma->obj);
 	ring->vaddr = NULL;
 	ring->vaddr = NULL;
 
 
+	ring->vma->obj->pin_global--;
 	i915_vma_unpin(ring->vma);
 	i915_vma_unpin(ring->vma);
 }
 }
 
 
@@ -1441,6 +1442,7 @@ intel_ring_context_pin(struct intel_engine_cs *engine,
 			goto err;
 			goto err;
 
 
 		ce->state->obj->mm.dirty = true;
 		ce->state->obj->mm.dirty = true;
+		ce->state->obj->pin_global++;
 	}
 	}
 
 
 	/* The kernel context is only used as a placeholder for flushing the
 	/* The kernel context is only used as a placeholder for flushing the
@@ -1475,8 +1477,10 @@ static void intel_ring_context_unpin(struct intel_engine_cs *engine,
 	if (--ce->pin_count)
 	if (--ce->pin_count)
 		return;
 		return;
 
 
-	if (ce->state)
+	if (ce->state) {
+		ce->state->obj->pin_global--;
 		i915_vma_unpin(ce->state);
 		i915_vma_unpin(ce->state);
+	}
 
 
 	i915_gem_context_put(ctx);
 	i915_gem_context_put(ctx);
 }
 }

+ 2 - 2
drivers/gpu/drm/i915/intel_sprite.c

@@ -230,7 +230,7 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
 #endif
 #endif
 }
 }
 
 
-static void
+void
 skl_update_plane(struct intel_plane *plane,
 skl_update_plane(struct intel_plane *plane,
 		 const struct intel_crtc_state *crtc_state,
 		 const struct intel_crtc_state *crtc_state,
 		 const struct intel_plane_state *plane_state)
 		 const struct intel_plane_state *plane_state)
@@ -311,7 +311,7 @@ skl_update_plane(struct intel_plane *plane,
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 }
 
 
-static void
+void
 skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
 skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
 {
 {
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);

+ 14 - 6
drivers/gpu/drm/i915/intel_uc.c

@@ -68,7 +68,7 @@ void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
 		if (HAS_HUC_UCODE(dev_priv))
 		if (HAS_HUC_UCODE(dev_priv))
 			intel_huc_select_fw(&dev_priv->huc);
 			intel_huc_select_fw(&dev_priv->huc);
 
 
-		if (intel_guc_select_fw(&dev_priv->guc))
+		if (intel_guc_fw_select(&dev_priv->guc))
 			i915_modparams.enable_guc_loading = 0;
 			i915_modparams.enable_guc_loading = 0;
 	}
 	}
 
 
@@ -195,7 +195,8 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
 			goto err_submission;
 			goto err_submission;
 
 
 		intel_huc_init_hw(&dev_priv->huc);
 		intel_huc_init_hw(&dev_priv->huc);
-		ret = intel_guc_init_hw(&dev_priv->guc);
+		intel_guc_init_params(guc);
+		ret = intel_guc_fw_upload(guc);
 		if (ret == 0 || ret != -EAGAIN)
 		if (ret == 0 || ret != -EAGAIN)
 			break;
 			break;
 
 
@@ -221,6 +222,12 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
 			goto err_interrupts;
 			goto err_interrupts;
 	}
 	}
 
 
+	dev_info(dev_priv->drm.dev, "GuC %s (firmware %s [version %u.%u])\n",
+		 i915_modparams.enable_guc_submission ? "submission enabled" :
+							"loaded",
+		 guc->fw.path,
+		 guc->fw.major_ver_found, guc->fw.minor_ver_found);
+
 	return 0;
 	return 0;
 
 
 	/*
 	/*
@@ -243,12 +250,14 @@ err_submission:
 err_guc:
 err_guc:
 	i915_ggtt_disable_guc(dev_priv);
 	i915_ggtt_disable_guc(dev_priv);
 
 
-	DRM_ERROR("GuC init failed\n");
 	if (i915_modparams.enable_guc_loading > 1 ||
 	if (i915_modparams.enable_guc_loading > 1 ||
-	    i915_modparams.enable_guc_submission > 1)
+	    i915_modparams.enable_guc_submission > 1) {
+		DRM_ERROR("GuC init failed. Firmware loading disabled.\n");
 		ret = -EIO;
 		ret = -EIO;
-	else
+	} else {
+		DRM_NOTE("GuC init failed. Firmware loading disabled.\n");
 		ret = 0;
 		ret = 0;
+	}
 
 
 	if (i915_modparams.enable_guc_submission) {
 	if (i915_modparams.enable_guc_submission) {
 		i915_modparams.enable_guc_submission = 0;
 		i915_modparams.enable_guc_submission = 0;
@@ -256,7 +265,6 @@ err_guc:
 	}
 	}
 
 
 	i915_modparams.enable_guc_loading = 0;
 	i915_modparams.enable_guc_loading = 0;
-	DRM_NOTE("GuC firmware loading disabled\n");
 
 
 	return ret;
 	return ret;
 }
 }

+ 162 - 37
drivers/gpu/drm/i915/intel_uc_fw.c

@@ -23,6 +23,7 @@
  */
  */
 
 
 #include <linux/firmware.h>
 #include <linux/firmware.h>
+#include <drm/drm_print.h>
 
 
 #include "intel_uc_fw.h"
 #include "intel_uc_fw.h"
 #include "i915_drv.h"
 #include "i915_drv.h"
@@ -45,26 +46,33 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
 	size_t size;
 	size_t size;
 	int err;
 	int err;
 
 
+	DRM_DEBUG_DRIVER("%s fw fetch %s\n",
+			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
+
 	if (!uc_fw->path)
 	if (!uc_fw->path)
 		return;
 		return;
 
 
 	uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
 	uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
-
-	DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
+	DRM_DEBUG_DRIVER("%s fw fetch %s\n",
+			 intel_uc_fw_type_repr(uc_fw->type),
 			 intel_uc_fw_status_repr(uc_fw->fetch_status));
 			 intel_uc_fw_status_repr(uc_fw->fetch_status));
 
 
 	err = request_firmware(&fw, uc_fw->path, &pdev->dev);
 	err = request_firmware(&fw, uc_fw->path, &pdev->dev);
-	if (err)
-		goto fail;
-	if (!fw)
+	if (err) {
+		DRM_DEBUG_DRIVER("%s fw request_firmware err=%d\n",
+				 intel_uc_fw_type_repr(uc_fw->type), err);
 		goto fail;
 		goto fail;
+	}
 
 
-	DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
-			 uc_fw->path, fw);
+	DRM_DEBUG_DRIVER("%s fw size %zu ptr %p\n",
+			 intel_uc_fw_type_repr(uc_fw->type), fw->size, fw);
 
 
 	/* Check the size of the blob before examining buffer contents */
 	/* Check the size of the blob before examining buffer contents */
 	if (fw->size < sizeof(struct uc_css_header)) {
 	if (fw->size < sizeof(struct uc_css_header)) {
-		DRM_NOTE("Firmware header is missing\n");
+		DRM_WARN("%s: Unexpected firmware size (%zu, min %zu)\n",
+			 intel_uc_fw_type_repr(uc_fw->type),
+			 fw->size, sizeof(struct uc_css_header));
+		err = -ENODATA;
 		goto fail;
 		goto fail;
 	}
 	}
 
 
@@ -77,7 +85,9 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
 			     sizeof(u32);
 			     sizeof(u32);
 
 
 	if (uc_fw->header_size != sizeof(struct uc_css_header)) {
 	if (uc_fw->header_size != sizeof(struct uc_css_header)) {
-		DRM_NOTE("CSS header definition mismatch\n");
+		DRM_WARN("%s: Mismatched firmware header definition\n",
+			 intel_uc_fw_type_repr(uc_fw->type));
+		err = -ENOEXEC;
 		goto fail;
 		goto fail;
 	}
 	}
 
 
@@ -85,9 +95,20 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
 	uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
 	uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
 	uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
 	uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
 
 
+	/* Header and uCode will be loaded to WOPCM */
+	size = uc_fw->header_size + uc_fw->ucode_size;
+	if (size > intel_guc_wopcm_size(dev_priv)) {
+		DRM_WARN("%s: Firmware is too large to fit in WOPCM\n",
+			 intel_uc_fw_type_repr(uc_fw->type));
+		err = -E2BIG;
+		goto fail;
+	}
+
 	/* now RSA */
 	/* now RSA */
 	if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
 	if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
-		DRM_NOTE("RSA key size is bad\n");
+		DRM_WARN("%s: Mismatched firmware RSA key size (%u)\n",
+			 intel_uc_fw_type_repr(uc_fw->type), css->key_size_dw);
+		err = -ENOEXEC;
 		goto fail;
 		goto fail;
 	}
 	}
 	uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
 	uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
@@ -96,7 +117,9 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
 	/* At least, it should have header, uCode and RSA. Size of all three. */
 	/* At least, it should have header, uCode and RSA. Size of all three. */
 	size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
 	size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
 	if (fw->size < size) {
 	if (fw->size < size) {
-		DRM_NOTE("Missing firmware components\n");
+		DRM_WARN("%s: Truncated firmware (%zu, expected %zu)\n",
+			 intel_uc_fw_type_repr(uc_fw->type), fw->size, size);
+		err = -ENOEXEC;
 		goto fail;
 		goto fail;
 	}
 	}
 
 
@@ -108,14 +131,6 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
 	 */
 	 */
 	switch (uc_fw->type) {
 	switch (uc_fw->type) {
 	case INTEL_UC_FW_TYPE_GUC:
 	case INTEL_UC_FW_TYPE_GUC:
-		/* Header and uCode will be loaded to WOPCM. Size of the two. */
-		size = uc_fw->header_size + uc_fw->ucode_size;
-
-		/* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
-		if (size > intel_guc_wopcm_size(dev_priv)) {
-			DRM_ERROR("Firmware is too large to fit in WOPCM\n");
-			goto fail;
-		}
 		uc_fw->major_ver_found = css->guc.sw_version >> 16;
 		uc_fw->major_ver_found = css->guc.sw_version >> 16;
 		uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
 		uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
 		break;
 		break;
@@ -126,17 +141,21 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
 		break;
 		break;
 
 
 	default:
 	default:
-		DRM_ERROR("Unknown firmware type %d\n", uc_fw->type);
-		err = -ENOEXEC;
-		goto fail;
+		MISSING_CASE(uc_fw->type);
+		break;
 	}
 	}
 
 
+	DRM_DEBUG_DRIVER("%s fw version %u.%u (wanted %u.%u)\n",
+			 intel_uc_fw_type_repr(uc_fw->type),
+			 uc_fw->major_ver_found, uc_fw->minor_ver_found,
+			 uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
+
 	if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
 	if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
-		DRM_NOTE("Skipping %s firmware version check\n",
+		DRM_NOTE("%s: Skipping firmware version check\n",
 			 intel_uc_fw_type_repr(uc_fw->type));
 			 intel_uc_fw_type_repr(uc_fw->type));
 	} else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
 	} else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
 		   uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
 		   uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
-		DRM_NOTE("%s firmware version %d.%d, required %d.%d\n",
+		DRM_NOTE("%s: Wrong firmware version (%u.%u, required %u.%u)\n",
 			 intel_uc_fw_type_repr(uc_fw->type),
 			 intel_uc_fw_type_repr(uc_fw->type),
 			 uc_fw->major_ver_found, uc_fw->minor_ver_found,
 			 uc_fw->major_ver_found, uc_fw->minor_ver_found,
 			 uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
 			 uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
@@ -144,34 +163,115 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
 		goto fail;
 		goto fail;
 	}
 	}
 
 
-	DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
-			 uc_fw->major_ver_found, uc_fw->minor_ver_found,
-			 uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
-
 	obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
 	obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
 	if (IS_ERR(obj)) {
 	if (IS_ERR(obj)) {
 		err = PTR_ERR(obj);
 		err = PTR_ERR(obj);
+		DRM_DEBUG_DRIVER("%s fw object_create err=%d\n",
+				 intel_uc_fw_type_repr(uc_fw->type), err);
 		goto fail;
 		goto fail;
 	}
 	}
 
 
 	uc_fw->obj = obj;
 	uc_fw->obj = obj;
 	uc_fw->size = fw->size;
 	uc_fw->size = fw->size;
-
-	DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
-			 uc_fw->obj);
+	uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
+	DRM_DEBUG_DRIVER("%s fw fetch %s\n",
+			 intel_uc_fw_type_repr(uc_fw->type),
+			 intel_uc_fw_status_repr(uc_fw->fetch_status));
 
 
 	release_firmware(fw);
 	release_firmware(fw);
-	uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
 	return;
 	return;
 
 
 fail:
 fail:
-	DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
-		 uc_fw->path, err);
-	DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
-			 err, fw, uc_fw->obj);
+	uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
+	DRM_DEBUG_DRIVER("%s fw fetch %s\n",
+			 intel_uc_fw_type_repr(uc_fw->type),
+			 intel_uc_fw_status_repr(uc_fw->fetch_status));
+
+	DRM_WARN("%s: Failed to fetch firmware %s (error %d)\n",
+		 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
+	DRM_INFO("%s: Firmware can be downloaded from %s\n",
+		 intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
 
 
 	release_firmware(fw);		/* OK even if fw is NULL */
 	release_firmware(fw);		/* OK even if fw is NULL */
-	uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
+}
+
+/**
+ * intel_uc_fw_upload - load uC firmware using custom loader
+ *
+ * @uc_fw: uC firmware
+ * @loader: custom uC firmware loader function
+ *
+ * Loads uC firmware using custom loader and updates internal flags.
+ */
+int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
+		       int (*xfer)(struct intel_uc_fw *uc_fw,
+				   struct i915_vma *vma))
+{
+	struct i915_vma *vma;
+	int err;
+
+	DRM_DEBUG_DRIVER("%s fw load %s\n",
+			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
+
+	if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
+		return -EIO;
+
+	uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
+	DRM_DEBUG_DRIVER("%s fw load %s\n",
+			 intel_uc_fw_type_repr(uc_fw->type),
+			 intel_uc_fw_status_repr(uc_fw->load_status));
+
+	/* Pin object with firmware */
+	err = i915_gem_object_set_to_gtt_domain(uc_fw->obj, false);
+	if (err) {
+		DRM_DEBUG_DRIVER("%s fw set-domain err=%d\n",
+				 intel_uc_fw_type_repr(uc_fw->type), err);
+		goto fail;
+	}
+
+	vma = i915_gem_object_ggtt_pin(uc_fw->obj, NULL, 0, 0,
+				       PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		DRM_DEBUG_DRIVER("%s fw ggtt-pin err=%d\n",
+				 intel_uc_fw_type_repr(uc_fw->type), err);
+		goto fail;
+	}
+
+	/* Call custom loader */
+	err = xfer(uc_fw, vma);
+
+	/*
+	 * We keep the object pages for reuse during resume. But we can unpin it
+	 * now that DMA has completed, so it doesn't continue to take up space.
+	 */
+	i915_vma_unpin(vma);
+
+	if (err)
+		goto fail;
+
+	uc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
+	DRM_DEBUG_DRIVER("%s fw load %s\n",
+			 intel_uc_fw_type_repr(uc_fw->type),
+			 intel_uc_fw_status_repr(uc_fw->load_status));
+
+	DRM_INFO("%s: Loaded firmware %s (version %u.%u)\n",
+		 intel_uc_fw_type_repr(uc_fw->type),
+		 uc_fw->path,
+		 uc_fw->major_ver_found, uc_fw->minor_ver_found);
+
+	return 0;
+
+fail:
+	uc_fw->load_status = INTEL_UC_FIRMWARE_FAIL;
+	DRM_DEBUG_DRIVER("%s fw load %s\n",
+			 intel_uc_fw_type_repr(uc_fw->type),
+			 intel_uc_fw_status_repr(uc_fw->load_status));
+
+	DRM_WARN("%s: Failed to load firmware %s (error %d)\n",
+		 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
+
+	return err;
 }
 }
 
 
 /**
 /**
@@ -191,3 +291,28 @@ void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
 
 
 	uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
 	uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
 }
 }
+
+/**
+ * intel_uc_fw_dump - dump information about uC firmware
+ * @uc_fw: uC firmware
+ * @p: the &drm_printer
+ *
+ * Pretty printer for uC firmware.
+ */
+void intel_uc_fw_dump(struct intel_uc_fw *uc_fw, struct drm_printer *p)
+{
+	drm_printf(p, "%s firmware: %s\n",
+		   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
+	drm_printf(p, "\tstatus: fetch %s, load %s\n",
+		   intel_uc_fw_status_repr(uc_fw->fetch_status),
+		   intel_uc_fw_status_repr(uc_fw->load_status));
+	drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
+		   uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
+		   uc_fw->major_ver_found, uc_fw->minor_ver_found);
+	drm_printf(p, "\theader: offset %u, size %u\n",
+		   uc_fw->header_offset, uc_fw->header_size);
+	drm_printf(p, "\tuCode: offset %u, size %u\n",
+		   uc_fw->ucode_offset, uc_fw->ucode_size);
+	drm_printf(p, "\tRSA: offset %u, size %u\n",
+		   uc_fw->rsa_offset, uc_fw->rsa_size);
+}

+ 14 - 0
drivers/gpu/drm/i915/intel_uc_fw.h

@@ -25,7 +25,12 @@
 #ifndef _INTEL_UC_FW_H_
 #ifndef _INTEL_UC_FW_H_
 #define _INTEL_UC_FW_H_
 #define _INTEL_UC_FW_H_
 
 
+struct drm_printer;
 struct drm_i915_private;
 struct drm_i915_private;
+struct i915_vma;
+
+/* Home of GuC, HuC and DMC firmwares */
+#define INTEL_UC_FIRMWARE_URL "https://01.org/linuxgraphics/downloads/firmware"
 
 
 enum intel_uc_fw_status {
 enum intel_uc_fw_status {
 	INTEL_UC_FIRMWARE_FAIL = -1,
 	INTEL_UC_FIRMWARE_FAIL = -1,
@@ -50,6 +55,11 @@ struct intel_uc_fw {
 	enum intel_uc_fw_status fetch_status;
 	enum intel_uc_fw_status fetch_status;
 	enum intel_uc_fw_status load_status;
 	enum intel_uc_fw_status load_status;
 
 
+	/*
+	 * The firmware build process will generate a version header file with major and
+	 * minor version defined. The versions are built into CSS header of firmware.
+	 * i915 kernel driver set the minimal firmware version required per platform.
+	 */
 	u16 major_ver_wanted;
 	u16 major_ver_wanted;
 	u16 minor_ver_wanted;
 	u16 minor_ver_wanted;
 	u16 major_ver_found;
 	u16 major_ver_found;
@@ -102,6 +112,10 @@ void intel_uc_fw_init(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
 
 
 void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
 void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
 		       struct intel_uc_fw *uc_fw);
 		       struct intel_uc_fw *uc_fw);
+int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
+		       int (*xfer)(struct intel_uc_fw *uc_fw,
+				   struct i915_vma *vma));
 void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
 void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
+void intel_uc_fw_dump(struct intel_uc_fw *uc_fw, struct drm_printer *p);
 
 
 #endif
 #endif

+ 7 - 6
drivers/gpu/drm/i915/intel_uncore.c

@@ -1403,6 +1403,9 @@ static void i915_stop_engines(struct drm_i915_private *dev_priv,
 	struct intel_engine_cs *engine;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 	enum intel_engine_id id;
 
 
+	if (INTEL_GEN(dev_priv) < 3)
+		return;
+
 	for_each_engine_masked(engine, dev_priv, engine_mask, id)
 	for_each_engine_masked(engine, dev_priv, engine_mask, id)
 		gen3_stop_engine(engine);
 		gen3_stop_engine(engine);
 }
 }
@@ -1742,16 +1745,12 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
 
 
 int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
 int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
 {
 {
-	reset_func reset;
+	reset_func reset = intel_get_gpu_reset(dev_priv);
 	int retry;
 	int retry;
 	int ret;
 	int ret;
 
 
 	might_sleep();
 	might_sleep();
 
 
-	reset = intel_get_gpu_reset(dev_priv);
-	if (reset == NULL)
-		return -ENODEV;
-
 	/* If the power well sleeps during the reset, the reset
 	/* If the power well sleeps during the reset, the reset
 	 * request may be dropped and never completes (causing -EIO).
 	 * request may be dropped and never completes (causing -EIO).
 	 */
 	 */
@@ -1771,7 +1770,9 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
 		 */
 		 */
 		i915_stop_engines(dev_priv, engine_mask);
 		i915_stop_engines(dev_priv, engine_mask);
 
 
-		ret = reset(dev_priv, engine_mask);
+		ret = -ENODEV;
+		if (reset)
+			ret = reset(dev_priv, engine_mask);
 		if (ret != -ETIMEDOUT)
 		if (ret != -ETIMEDOUT)
 			break;
 			break;
 
 

+ 8 - 0
drivers/gpu/drm/i915/intel_vbt_defs.h

@@ -306,6 +306,14 @@ struct bdb_general_features {
 
 
 #define LEGACY_CHILD_DEVICE_CONFIG_SIZE		33
 #define LEGACY_CHILD_DEVICE_CONFIG_SIZE		33
 
 
+/* DDC Bus DDI Type 155+ */
+enum vbt_gmbus_ddi {
+	DDC_BUS_DDI_B = 0x1,
+	DDC_BUS_DDI_C,
+	DDC_BUS_DDI_D,
+	DDC_BUS_DDI_F,
+};
+
 /*
 /*
  * The child device config, aka the display device data structure, provides a
  * The child device config, aka the display device data structure, provides a
  * description of a port and its configuration on the platform.
  * description of a port and its configuration on the platform.

+ 2 - 2
drivers/gpu/drm/i915/selftests/huge_pages.c

@@ -609,7 +609,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
 	bool single = false;
 	bool single = false;
 	LIST_HEAD(objects);
 	LIST_HEAD(objects);
 	IGT_TIMEOUT(end_time);
 	IGT_TIMEOUT(end_time);
-	int err;
+	int err = -ENODEV;
 
 
 	for_each_prime_number_from(page_num, 1, max_pages) {
 	for_each_prime_number_from(page_num, 1, max_pages) {
 		struct drm_i915_gem_object *obj;
 		struct drm_i915_gem_object *obj;
@@ -1157,7 +1157,7 @@ static int igt_ppgtt_exhaust_huge(void *arg)
 	unsigned int size_mask;
 	unsigned int size_mask;
 	unsigned int page_mask;
 	unsigned int page_mask;
 	int n, i;
 	int n, i;
-	int err;
+	int err = -ENODEV;
 
 
 	/*
 	/*
 	 * Sanity check creating objects with a varying mix of page sizes --
 	 * Sanity check creating objects with a varying mix of page sizes --

+ 1 - 1
drivers/gpu/drm/i915/selftests/i915_gem_context.c

@@ -417,7 +417,7 @@ static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
-	list_for_each_entry(obj, &i915->mm.bound_list, global_link) {
+	list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
 		struct i915_vma *vma;
 		struct i915_vma *vma;
 
 
 		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
 		vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);

+ 157 - 5
drivers/gpu/drm/i915/selftests/i915_gem_evict.c

@@ -24,6 +24,9 @@
 
 
 #include "../i915_selftest.h"
 #include "../i915_selftest.h"
 
 
+#include "lib_sw_fence.h"
+#include "mock_context.h"
+#include "mock_drm.h"
 #include "mock_gem_device.h"
 #include "mock_gem_device.h"
 
 
 static int populate_ggtt(struct drm_i915_private *i915)
 static int populate_ggtt(struct drm_i915_private *i915)
@@ -47,7 +50,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
 
 
 	if (!list_empty(&i915->mm.unbound_list)) {
 	if (!list_empty(&i915->mm.unbound_list)) {
 		size = 0;
 		size = 0;
-		list_for_each_entry(obj, &i915->mm.unbound_list, global_link)
+		list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
 			size++;
 			size++;
 
 
 		pr_err("Found %lld objects unbound!\n", size);
 		pr_err("Found %lld objects unbound!\n", size);
@@ -74,10 +77,10 @@ static void cleanup_objects(struct drm_i915_private *i915)
 {
 {
 	struct drm_i915_gem_object *obj, *on;
 	struct drm_i915_gem_object *obj, *on;
 
 
-	list_for_each_entry_safe(obj, on, &i915->mm.unbound_list, global_link)
+	list_for_each_entry_safe(obj, on, &i915->mm.unbound_list, mm.link)
 		i915_gem_object_put(obj);
 		i915_gem_object_put(obj);
 
 
-	list_for_each_entry_safe(obj, on, &i915->mm.bound_list, global_link)
+	list_for_each_entry_safe(obj, on, &i915->mm.bound_list, mm.link)
 		i915_gem_object_put(obj);
 		i915_gem_object_put(obj);
 
 
 	mutex_unlock(&i915->drm.struct_mutex);
 	mutex_unlock(&i915->drm.struct_mutex);
@@ -149,8 +152,6 @@ static int igt_overcommit(void *arg)
 		goto cleanup;
 		goto cleanup;
 	}
 	}
 
 
-	list_move(&obj->global_link, &i915->mm.unbound_list);
-
 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
 	if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) {
 	if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) {
 		pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma));
 		pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma));
@@ -325,6 +326,148 @@ cleanup:
 	return err;
 	return err;
 }
 }
 
 
+static int igt_evict_contexts(void *arg)
+{
+	const u64 PRETEND_GGTT_SIZE = 16ull << 20;
+	struct drm_i915_private *i915 = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	struct reserved {
+		struct drm_mm_node node;
+		struct reserved *next;
+	} *reserved = NULL;
+	struct drm_mm_node hole;
+	unsigned long count;
+	int err;
+
+	/*
+	 * The purpose of this test is to verify that we will trigger an
+	 * eviction in the GGTT when constructing a request that requires
+	 * additional space in the GGTT for pinning the context. This space
+	 * is not directly tied to the request so reclaiming it requires
+	 * extra work.
+	 *
+	 * As such this test is only meaningful for full-ppgtt environments
+	 * where the GTT space of the request is separate from the GGTT
+	 * allocation required to build the request.
+	 */
+	if (!USES_FULL_PPGTT(i915))
+		return 0;
+
+	mutex_lock(&i915->drm.struct_mutex);
+
+	/* Reserve a block so that we know we have enough to fit a few rq */
+	memset(&hole, 0, sizeof(hole));
+	err = i915_gem_gtt_insert(&i915->ggtt.base, &hole,
+				  PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
+				  0, i915->ggtt.base.total,
+				  PIN_NOEVICT);
+	if (err)
+		goto out_locked;
+
+	/* Make the GGTT appear small by filling it with unevictable nodes */
+	count = 0;
+	do {
+		struct reserved *r;
+
+		r = kcalloc(1, sizeof(*r), GFP_KERNEL);
+		if (!r) {
+			err = -ENOMEM;
+			goto out_locked;
+		}
+
+		if (i915_gem_gtt_insert(&i915->ggtt.base, &r->node,
+					1ul << 20, 0, I915_COLOR_UNEVICTABLE,
+					0, i915->ggtt.base.total,
+					PIN_NOEVICT)) {
+			kfree(r);
+			break;
+		}
+
+		r->next = reserved;
+		reserved = r;
+
+		count++;
+	} while (1);
+	drm_mm_remove_node(&hole);
+	mutex_unlock(&i915->drm.struct_mutex);
+	pr_info("Filled GGTT with %lu 1MiB nodes\n", count);
+
+	/* Overfill the GGTT with context objects and so try to evict one. */
+	for_each_engine(engine, i915, id) {
+		struct i915_sw_fence fence;
+		struct drm_file *file;
+
+		file = mock_file(i915);
+		if (IS_ERR(file))
+			return PTR_ERR(file);
+
+		count = 0;
+		mutex_lock(&i915->drm.struct_mutex);
+		onstack_fence_init(&fence);
+		do {
+			struct drm_i915_gem_request *rq;
+			struct i915_gem_context *ctx;
+
+			ctx = live_context(i915, file);
+			if (!ctx)
+				break;
+
+			/* We will need some GGTT space for the rq's context */
+			igt_evict_ctl.fail_if_busy = true;
+			rq = i915_gem_request_alloc(engine, ctx);
+			igt_evict_ctl.fail_if_busy = false;
+
+			if (IS_ERR(rq)) {
+				/* When full, fail_if_busy will trigger EBUSY */
+				if (PTR_ERR(rq) != -EBUSY) {
+					pr_err("Unexpected error from request alloc (ctx hw id %u, on %s): %d\n",
+					       ctx->hw_id, engine->name,
+					       (int)PTR_ERR(rq));
+					err = PTR_ERR(rq);
+				}
+				break;
+			}
+
+			/* Keep every request/ctx pinned until we are full */
+			err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
+							       &fence,
+							       GFP_KERNEL);
+			if (err < 0)
+				break;
+
+			i915_add_request(rq);
+			count++;
+			err = 0;
+		} while(1);
+		mutex_unlock(&i915->drm.struct_mutex);
+
+		onstack_fence_fini(&fence);
+		pr_info("Submitted %lu contexts/requests on %s\n",
+			count, engine->name);
+
+		mock_file_free(i915, file);
+		if (err)
+			break;
+	}
+
+	mutex_lock(&i915->drm.struct_mutex);
+out_locked:
+	while (reserved) {
+		struct reserved *next = reserved->next;
+
+		drm_mm_remove_node(&reserved->node);
+		kfree(reserved);
+
+		reserved = next;
+	}
+	if (drm_mm_node_allocated(&hole))
+		drm_mm_remove_node(&hole);
+	mutex_unlock(&i915->drm.struct_mutex);
+
+	return err;
+}
+
 int i915_gem_evict_mock_selftests(void)
 int i915_gem_evict_mock_selftests(void)
 {
 {
 	static const struct i915_subtest tests[] = {
 	static const struct i915_subtest tests[] = {
@@ -348,3 +491,12 @@ int i915_gem_evict_mock_selftests(void)
 	drm_dev_unref(&i915->drm);
 	drm_dev_unref(&i915->drm);
 	return err;
 	return err;
 }
 }
+
+int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(igt_evict_contexts),
+	};
+
+	return i915_subtests(tests, i915);
+}

+ 1 - 0
drivers/gpu/drm/i915/selftests/i915_live_selftests.h

@@ -15,6 +15,7 @@ selftest(objects, i915_gem_object_live_selftests)
 selftest(dmabuf, i915_gem_dmabuf_live_selftests)
 selftest(dmabuf, i915_gem_dmabuf_live_selftests)
 selftest(coherency, i915_gem_coherency_live_selftests)
 selftest(coherency, i915_gem_coherency_live_selftests)
 selftest(gtt, i915_gem_gtt_live_selftests)
 selftest(gtt, i915_gem_gtt_live_selftests)
+selftest(evict, i915_gem_evict_live_selftests)
 selftest(hugepages, i915_gem_huge_page_live_selftests)
 selftest(hugepages, i915_gem_huge_page_live_selftests)
 selftest(contexts, i915_gem_context_live_selftests)
 selftest(contexts, i915_gem_context_live_selftests)
 selftest(hangcheck, intel_hangcheck_live_selftests)
 selftest(hangcheck, intel_hangcheck_live_selftests)

+ 42 - 0
drivers/gpu/drm/i915/selftests/i915_sw_fence.c

@@ -24,6 +24,7 @@
 
 
 #include <linux/completion.h>
 #include <linux/completion.h>
 #include <linux/delay.h>
 #include <linux/delay.h>
+#include <linux/prime_numbers.h>
 
 
 #include "../i915_selftest.h"
 #include "../i915_selftest.h"
 
 
@@ -565,6 +566,46 @@ err_in:
 	return ret;
 	return ret;
 }
 }
 
 
+static int test_timer(void *arg)
+{
+	unsigned long target, delay;
+	struct timed_fence tf;
+
+	timed_fence_init(&tf, target = jiffies);
+	if (!i915_sw_fence_done(&tf.fence)) {
+		pr_err("Fence with immediate expiration not signaled\n");
+		goto err;
+	}
+	timed_fence_fini(&tf);
+
+	for_each_prime_number(delay, i915_selftest.timeout_jiffies/2) {
+		timed_fence_init(&tf, target = jiffies + delay);
+		if (i915_sw_fence_done(&tf.fence)) {
+			pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay);
+			goto err;
+		}
+
+		i915_sw_fence_wait(&tf.fence);
+		if (!i915_sw_fence_done(&tf.fence)) {
+			pr_err("Fence not signaled after wait\n");
+			goto err;
+		}
+		if (time_before(jiffies, target)) {
+			pr_err("Fence signaled too early, target=%lu, now=%lu\n",
+			       target, jiffies);
+			goto err;
+		}
+
+		timed_fence_fini(&tf);
+	}
+
+	return 0;
+
+err:
+	timed_fence_fini(&tf);
+	return -EINVAL;
+}
+
 int i915_sw_fence_mock_selftests(void)
 int i915_sw_fence_mock_selftests(void)
 {
 {
 	static const struct i915_subtest tests[] = {
 	static const struct i915_subtest tests[] = {
@@ -576,6 +617,7 @@ int i915_sw_fence_mock_selftests(void)
 		SUBTEST(test_C_AB),
 		SUBTEST(test_C_AB),
 		SUBTEST(test_chain),
 		SUBTEST(test_chain),
 		SUBTEST(test_ipc),
 		SUBTEST(test_ipc),
+		SUBTEST(test_timer),
 	};
 	};
 
 
 	return i915_subtests(tests, NULL);
 	return i915_subtests(tests, NULL);

+ 78 - 0
drivers/gpu/drm/i915/selftests/lib_sw_fence.c

@@ -0,0 +1,78 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "lib_sw_fence.h"
+
+/* Small library of different fence types useful for writing tests */
+
+static int __i915_sw_fence_call
+nop_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+	return NOTIFY_DONE;
+}
+
+void __onstack_fence_init(struct i915_sw_fence *fence,
+			  const char *name,
+			  struct lock_class_key *key)
+{
+	debug_fence_init_onstack(fence);
+
+	__init_waitqueue_head(&fence->wait, name, key);
+	atomic_set(&fence->pending, 1);
+	fence->flags = (unsigned long)nop_fence_notify;
+}
+
+void onstack_fence_fini(struct i915_sw_fence *fence)
+{
+	i915_sw_fence_commit(fence);
+	i915_sw_fence_fini(fence);
+}
+
+static void timed_fence_wake(unsigned long data)
+{
+	struct timed_fence *tf = (struct timed_fence *)data;
+
+	i915_sw_fence_commit(&tf->fence);
+}
+
+void timed_fence_init(struct timed_fence *tf, unsigned long expires)
+{
+	onstack_fence_init(&tf->fence);
+
+	setup_timer_on_stack(&tf->timer, timed_fence_wake, (unsigned long)tf);
+
+	if (time_after(expires, jiffies))
+		mod_timer(&tf->timer, expires);
+	else
+		i915_sw_fence_commit(&tf->fence);
+}
+
+void timed_fence_fini(struct timed_fence *tf)
+{
+	if (del_timer_sync(&tf->timer))
+		i915_sw_fence_commit(&tf->fence);
+
+	destroy_timer_on_stack(&tf->timer);
+	i915_sw_fence_fini(&tf->fence);
+}

+ 42 - 0
drivers/gpu/drm/i915/selftests/lib_sw_fence.h

@@ -0,0 +1,42 @@
+/*
+ * lib_sw_fence.h - library routines for testing N:M synchronisation points
+ *
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * This file is released under the GPLv2.
+ *
+ */
+
+#ifndef _LIB_SW_FENCE_H_
+#define _LIB_SW_FENCE_H_
+
+#include <linux/timer.h>
+
+#include "../i915_sw_fence.h"
+
+#ifdef CONFIG_LOCKDEP
+#define onstack_fence_init(fence)				\
+do {								\
+	static struct lock_class_key __key;			\
+								\
+	__onstack_fence_init((fence), #fence, &__key);	\
+} while (0)
+#else
+#define onstack_fence_init(fence)				\
+	__onstack_fence_init((fence), NULL, NULL)
+#endif
+
+void __onstack_fence_init(struct i915_sw_fence *fence,
+			  const char *name,
+			  struct lock_class_key *key);
+void onstack_fence_fini(struct i915_sw_fence *fence);
+
+struct timed_fence {
+	struct i915_sw_fence fence;
+	struct timer_list timer;
+};
+
+void timed_fence_init(struct timed_fence *tf, unsigned long expires);
+void timed_fence_fini(struct timed_fence *tf);
+
+#endif /* _LIB_SW_FENCE_H_ */

+ 1 - 5
drivers/gpu/drm/i915/selftests/mock_context.c

@@ -73,11 +73,7 @@ err_put:
 
 
 void mock_context_close(struct i915_gem_context *ctx)
 void mock_context_close(struct i915_gem_context *ctx)
 {
 {
-	i915_gem_context_set_closed(ctx);
-
-	i915_ppgtt_close(&ctx->ppgtt->base);
-
-	i915_gem_context_put(ctx);
+	context_close(ctx);
 }
 }
 
 
 void mock_init_contexts(struct drm_i915_private *i915)
 void mock_init_contexts(struct drm_i915_private *i915)

+ 3 - 5
drivers/gpu/drm/i915/selftests/mock_engine.c

@@ -32,9 +32,9 @@ static struct mock_request *first_request(struct mock_engine *engine)
 					link);
 					link);
 }
 }
 
 
-static void hw_delay_complete(unsigned long data)
+static void hw_delay_complete(struct timer_list *t)
 {
 {
-	struct mock_engine *engine = (typeof(engine))data;
+	struct mock_engine *engine = from_timer(engine, t, hw_delay);
 	struct mock_request *request;
 	struct mock_request *request;
 
 
 	spin_lock(&engine->hw_lock);
 	spin_lock(&engine->hw_lock);
@@ -161,9 +161,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 
 
 	/* fake hw queue */
 	/* fake hw queue */
 	spin_lock_init(&engine->hw_lock);
 	spin_lock_init(&engine->hw_lock);
-	setup_timer(&engine->hw_delay,
-		    hw_delay_complete,
-		    (unsigned long)engine);
+	timer_setup(&engine->hw_delay, hw_delay_complete, 0);
 	INIT_LIST_HEAD(&engine->hw_queue);
 	INIT_LIST_HEAD(&engine->hw_queue);
 
 
 	return &engine->base;
 	return &engine->base;