Browse Source

Merge tag 'drm-intel-next-fixes-2018-03-22' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

GVT regression fix that caused guest VM GPU hang.
Fix for race conditions in declaring GPU wedged (hit in CI).

* tag 'drm-intel-next-fixes-2018-03-22' of git://anongit.freedesktop.org/drm/drm-intel:
  drm/i915/gvt: force to set all context control bits from guest
  drm/i915/gvt: Update PDPs after a vGPU mm object is pinned.
  drm/i915/gvt: Invalidate vGPU PPGTT mm objects during a vGPU reset.
  drm/i915/kvmgt: Handle kzalloc failure
  drm/i915/gvt: fix spelling mistake: "destoried" -> "destroyed"
  drm/i915/gvt: Remove reduntant printing of untracked mmio
  drm/i915/pmu: Work around compiler warnings on some kernel configs
  drm/i915: Only call tasklet_kill() on the first prepare_reset
  drm/i915: Wrap engine->schedule in RCU locks for set-wedge protection
  drm/i915/icl: do not save DDI A/E sharing bit for ICL
Dave Airlie 7 years ago
parent
commit
b4eec0fa53

+ 23 - 1
drivers/gpu/drm/i915/gvt/gtt.c

@@ -2046,7 +2046,7 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
 	}
 	}
 
 
 	if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
 	if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
-		gvt_err("vgpu ppgtt mm is not fully destoried\n");
+		gvt_err("vgpu ppgtt mm is not fully destroyed\n");
 
 
 	if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
 	if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
 		gvt_err("Why we still has spt not freed?\n");
 		gvt_err("Why we still has spt not freed?\n");
@@ -2290,6 +2290,28 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt)
 		clean_spt_oos(gvt);
 		clean_spt_oos(gvt);
 }
 }
 
 
+/**
+ * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances
+ * @vgpu: a vGPU
+ *
+ * This function is called when invalidate all PPGTT instances of a vGPU.
+ *
+ */
+void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
+{
+	struct list_head *pos, *n;
+	struct intel_vgpu_mm *mm;
+
+	list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
+		mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
+		if (mm->type == INTEL_GVT_MM_PPGTT) {
+			list_del_init(&mm->ppgtt_mm.lru_list);
+			if (mm->ppgtt_mm.shadowed)
+				invalidate_ppgtt_mm(mm);
+		}
+	}
+}
+
 /**
 /**
  * intel_vgpu_reset_ggtt - reset the GGTT entry
  * intel_vgpu_reset_ggtt - reset the GGTT entry
  * @vgpu: a vGPU
  * @vgpu: a vGPU

+ 1 - 0
drivers/gpu/drm/i915/gvt/gtt.h

@@ -194,6 +194,7 @@ struct intel_vgpu_gtt {
 extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
 extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
 extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
 extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
+void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
 
 
 extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
 extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
 void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu);
 void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu);

+ 9 - 0
drivers/gpu/drm/i915/gvt/handlers.c

@@ -1767,6 +1767,10 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 	MMIO_D(CURBASE(PIPE_B), D_ALL);
 	MMIO_D(CURBASE(PIPE_B), D_ALL);
 	MMIO_D(CURBASE(PIPE_C), D_ALL);
 	MMIO_D(CURBASE(PIPE_C), D_ALL);
 
 
+	MMIO_D(CUR_FBC_CTL(PIPE_A), D_ALL);
+	MMIO_D(CUR_FBC_CTL(PIPE_B), D_ALL);
+	MMIO_D(CUR_FBC_CTL(PIPE_C), D_ALL);
+
 	MMIO_D(_MMIO(0x700ac), D_ALL);
 	MMIO_D(_MMIO(0x700ac), D_ALL);
 	MMIO_D(_MMIO(0x710ac), D_ALL);
 	MMIO_D(_MMIO(0x710ac), D_ALL);
 	MMIO_D(_MMIO(0x720ac), D_ALL);
 	MMIO_D(_MMIO(0x720ac), D_ALL);
@@ -2228,6 +2232,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 
 
 	MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL);
 	MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL);
 	MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL);
 	MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL);
+	MMIO_D(HSW_AUD_MISC_CTRL(PIPE_A), D_ALL);
 
 
 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL);
 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL);
 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL);
 	MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL);
@@ -2559,6 +2564,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
 	MMIO_D(WM_MISC, D_BDW);
 	MMIO_D(WM_MISC, D_BDW);
 	MMIO_D(_MMIO(BDW_EDP_PSR_BASE), D_BDW);
 	MMIO_D(_MMIO(BDW_EDP_PSR_BASE), D_BDW);
 
 
+	MMIO_D(_MMIO(0x6671c), D_BDW_PLUS);
 	MMIO_D(_MMIO(0x66c00), D_BDW_PLUS);
 	MMIO_D(_MMIO(0x66c00), D_BDW_PLUS);
 	MMIO_D(_MMIO(0x66c04), D_BDW_PLUS);
 	MMIO_D(_MMIO(0x66c04), D_BDW_PLUS);
 
 
@@ -2787,6 +2793,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
 	MMIO_D(_MMIO(0x70380), D_SKL_PLUS);
 	MMIO_D(_MMIO(0x70380), D_SKL_PLUS);
 	MMIO_D(_MMIO(0x71380), D_SKL_PLUS);
 	MMIO_D(_MMIO(0x71380), D_SKL_PLUS);
 	MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
 	MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
+	MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
 	MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
 	MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
 
 
 	MMIO_D(_MMIO(0x8f074), D_SKL | D_KBL);
 	MMIO_D(_MMIO(0x8f074), D_SKL | D_KBL);
@@ -2801,7 +2808,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
 	MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
 	MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
 	MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
 	MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
 
 
+	MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
 	MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
 	MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
+	MMIO_D(RC6_LOCATION, D_SKL_PLUS);
 	MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
 	MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
 	MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
 
 

+ 15 - 7
drivers/gpu/drm/i915/gvt/kvmgt.c

@@ -184,7 +184,7 @@ static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
 	return NULL;
 	return NULL;
 }
 }
 
 
-static void __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
+static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
 		dma_addr_t dma_addr)
 		dma_addr_t dma_addr)
 {
 {
 	struct gvt_dma *new, *itr;
 	struct gvt_dma *new, *itr;
@@ -192,7 +192,7 @@ static void __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
 
 
 	new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
 	new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
 	if (!new)
 	if (!new)
-		return;
+		return -ENOMEM;
 
 
 	new->vgpu = vgpu;
 	new->vgpu = vgpu;
 	new->gfn = gfn;
 	new->gfn = gfn;
@@ -229,6 +229,7 @@ static void __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
 	rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache);
 	rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache);
 
 
 	vgpu->vdev.nr_cache_entries++;
 	vgpu->vdev.nr_cache_entries++;
+	return 0;
 }
 }
 
 
 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
@@ -1586,11 +1587,12 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
 	entry = __gvt_cache_find_gfn(info->vgpu, gfn);
 	entry = __gvt_cache_find_gfn(info->vgpu, gfn);
 	if (!entry) {
 	if (!entry) {
 		ret = gvt_dma_map_page(vgpu, gfn, dma_addr);
 		ret = gvt_dma_map_page(vgpu, gfn, dma_addr);
-		if (ret) {
-			mutex_unlock(&info->vgpu->vdev.cache_lock);
-			return ret;
-		}
-		__gvt_cache_add(info->vgpu, gfn, *dma_addr);
+		if (ret)
+			goto err_unlock;
+
+		ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr);
+		if (ret)
+			goto err_unmap;
 	} else {
 	} else {
 		kref_get(&entry->ref);
 		kref_get(&entry->ref);
 		*dma_addr = entry->dma_addr;
 		*dma_addr = entry->dma_addr;
@@ -1598,6 +1600,12 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
 
 
 	mutex_unlock(&info->vgpu->vdev.cache_lock);
 	mutex_unlock(&info->vgpu->vdev.cache_lock);
 	return 0;
 	return 0;
+
+err_unmap:
+	gvt_dma_unmap_page(vgpu, gfn, *dma_addr);
+err_unlock:
+	mutex_unlock(&info->vgpu->vdev.cache_lock);
+	return ret;
 }
 }
 
 
 static void __gvt_dma_release(struct kref *ref)
 static void __gvt_dma_release(struct kref *ref)

+ 33 - 4
drivers/gpu/drm/i915/gvt/scheduler.c

@@ -52,6 +52,29 @@ static void set_context_pdp_root_pointer(
 		pdp_pair[i].val = pdp[7 - i];
 		pdp_pair[i].val = pdp[7 - i];
 }
 }
 
 
+static void update_shadow_pdps(struct intel_vgpu_workload *workload)
+{
+	struct intel_vgpu *vgpu = workload->vgpu;
+	int ring_id = workload->ring_id;
+	struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
+	struct drm_i915_gem_object *ctx_obj =
+		shadow_ctx->engine[ring_id].state->obj;
+	struct execlist_ring_context *shadow_ring_context;
+	struct page *page;
+
+	if (WARN_ON(!workload->shadow_mm))
+		return;
+
+	if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
+		return;
+
+	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+	shadow_ring_context = kmap(page);
+	set_context_pdp_root_pointer(shadow_ring_context,
+			(void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
+	kunmap(page);
+}
+
 static int populate_shadow_context(struct intel_vgpu_workload *workload)
 static int populate_shadow_context(struct intel_vgpu_workload *workload)
 {
 {
 	struct intel_vgpu *vgpu = workload->vgpu;
 	struct intel_vgpu *vgpu = workload->vgpu;
@@ -101,8 +124,14 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 #define COPY_REG(name) \
 #define COPY_REG(name) \
 	intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
 	intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
 		+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
 		+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
+#define COPY_REG_MASKED(name) {\
+		intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
+					      + RING_CTX_OFF(name.val),\
+					      &shadow_ring_context->name.val, 4);\
+		shadow_ring_context->name.val |= 0xffff << 16;\
+	}
 
 
-	COPY_REG(ctx_ctrl);
+	COPY_REG_MASKED(ctx_ctrl);
 	COPY_REG(ctx_timestamp);
 	COPY_REG(ctx_timestamp);
 
 
 	if (ring_id == RCS) {
 	if (ring_id == RCS) {
@@ -111,9 +140,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 		COPY_REG(rcs_indirect_ctx_offset);
 		COPY_REG(rcs_indirect_ctx_offset);
 	}
 	}
 #undef COPY_REG
 #undef COPY_REG
-
-	set_context_pdp_root_pointer(shadow_ring_context,
-				     (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
+#undef COPY_REG_MASKED
 
 
 	intel_gvt_hypervisor_read_gpa(vgpu,
 	intel_gvt_hypervisor_read_gpa(vgpu,
 			workload->ring_context_gpa +
 			workload->ring_context_gpa +
@@ -509,6 +536,8 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
 		return ret;
 		return ret;
 	}
 	}
 
 
+	update_shadow_pdps(workload);
+
 	ret = intel_vgpu_sync_oos_pages(workload->vgpu);
 	ret = intel_vgpu_sync_oos_pages(workload->vgpu);
 	if (ret) {
 	if (ret) {
 		gvt_vgpu_err("fail to vgpu sync oos pages\n");
 		gvt_vgpu_err("fail to vgpu sync oos pages\n");

+ 1 - 0
drivers/gpu/drm/i915/gvt/vgpu.c

@@ -522,6 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
 	/* full GPU reset or device model level reset */
 	/* full GPU reset or device model level reset */
 	if (engine_mask == ALL_ENGINES || dmlr) {
 	if (engine_mask == ALL_ENGINES || dmlr) {
 		intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
 		intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
+		intel_vgpu_invalidate_ppgtt(vgpu);
 		/*fence will not be reset during virtual reset */
 		/*fence will not be reset during virtual reset */
 		if (dmlr) {
 		if (dmlr) {
 			intel_vgpu_reset_gtt(vgpu);
 			intel_vgpu_reset_gtt(vgpu);

+ 16 - 7
drivers/gpu/drm/i915/i915_gem.c

@@ -471,10 +471,11 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
 
 
 	rq = to_request(fence);
 	rq = to_request(fence);
 	engine = rq->engine;
 	engine = rq->engine;
-	if (!engine->schedule)
-		return;
 
 
-	engine->schedule(rq, prio);
+	rcu_read_lock();
+	if (engine->schedule)
+		engine->schedule(rq, prio);
+	rcu_read_unlock();
 }
 }
 
 
 static void fence_set_priority(struct dma_fence *fence, int prio)
 static void fence_set_priority(struct dma_fence *fence, int prio)
@@ -2939,8 +2940,16 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
 	 * calling engine->init_hw() and also writing the ELSP.
 	 * calling engine->init_hw() and also writing the ELSP.
 	 * Turning off the execlists->tasklet until the reset is over
 	 * Turning off the execlists->tasklet until the reset is over
 	 * prevents the race.
 	 * prevents the race.
+	 *
+	 * Note that this needs to be a single atomic operation on the
+	 * tasklet (flush existing tasks, prevent new tasks) to prevent
+	 * a race between reset and set-wedged. It is not, so we do the best
+	 * we can atm and make sure we don't lock the machine up in the more
+	 * common case of recursively being called from set-wedged from inside
+	 * i915_reset.
 	 */
 	 */
-	tasklet_kill(&engine->execlists.tasklet);
+	if (!atomic_read(&engine->execlists.tasklet.count))
+		tasklet_kill(&engine->execlists.tasklet);
 	tasklet_disable(&engine->execlists.tasklet);
 	tasklet_disable(&engine->execlists.tasklet);
 
 
 	/*
 	/*
@@ -3214,8 +3223,11 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
 	 */
 	 */
 	for_each_engine(engine, i915, id) {
 	for_each_engine(engine, i915, id) {
 		i915_gem_reset_prepare_engine(engine);
 		i915_gem_reset_prepare_engine(engine);
+
 		engine->submit_request = nop_submit_request;
 		engine->submit_request = nop_submit_request;
+		engine->schedule = NULL;
 	}
 	}
+	i915->caps.scheduler = 0;
 
 
 	/*
 	/*
 	 * Make sure no one is running the old callback before we proceed with
 	 * Make sure no one is running the old callback before we proceed with
@@ -3233,11 +3245,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
 		 * start to complete all requests.
 		 * start to complete all requests.
 		 */
 		 */
 		engine->submit_request = nop_complete_submit_request;
 		engine->submit_request = nop_complete_submit_request;
-		engine->schedule = NULL;
 	}
 	}
 
 
-	i915->caps.scheduler = 0;
-
 	/*
 	/*
 	 * Make sure no request can slip through without getting completed by
 	 * Make sure no request can slip through without getting completed by
 	 * either this call here to intel_engine_init_global_seqno, or the one
 	 * either this call here to intel_engine_init_global_seqno, or the one

+ 13 - 19
drivers/gpu/drm/i915/i915_pmu.c

@@ -433,7 +433,7 @@ static u64 __get_rc6(struct drm_i915_private *i915)
 	return val;
 	return val;
 }
 }
 
 
-static u64 get_rc6(struct drm_i915_private *i915, bool locked)
+static u64 get_rc6(struct drm_i915_private *i915)
 {
 {
 #if IS_ENABLED(CONFIG_PM)
 #if IS_ENABLED(CONFIG_PM)
 	unsigned long flags;
 	unsigned long flags;
@@ -449,8 +449,7 @@ static u64 get_rc6(struct drm_i915_private *i915, bool locked)
 		 * previously.
 		 * previously.
 		 */
 		 */
 
 
-		if (!locked)
-			spin_lock_irqsave(&i915->pmu.lock, flags);
+		spin_lock_irqsave(&i915->pmu.lock, flags);
 
 
 		if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
 		if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
 			i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
 			i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
@@ -459,12 +458,10 @@ static u64 get_rc6(struct drm_i915_private *i915, bool locked)
 			val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
 			val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
 		}
 		}
 
 
-		if (!locked)
-			spin_unlock_irqrestore(&i915->pmu.lock, flags);
+		spin_unlock_irqrestore(&i915->pmu.lock, flags);
 	} else {
 	} else {
 		struct pci_dev *pdev = i915->drm.pdev;
 		struct pci_dev *pdev = i915->drm.pdev;
 		struct device *kdev = &pdev->dev;
 		struct device *kdev = &pdev->dev;
-		unsigned long flags2;
 
 
 		/*
 		/*
 		 * We are runtime suspended.
 		 * We are runtime suspended.
@@ -473,10 +470,8 @@ static u64 get_rc6(struct drm_i915_private *i915, bool locked)
 		 * on top of the last known real value, as the approximated RC6
 		 * on top of the last known real value, as the approximated RC6
 		 * counter value.
 		 * counter value.
 		 */
 		 */
-		if (!locked)
-			spin_lock_irqsave(&i915->pmu.lock, flags);
-
-		spin_lock_irqsave(&kdev->power.lock, flags2);
+		spin_lock_irqsave(&i915->pmu.lock, flags);
+		spin_lock(&kdev->power.lock);
 
 
 		if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
 		if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
 			i915->pmu.suspended_jiffies_last =
 			i915->pmu.suspended_jiffies_last =
@@ -486,14 +481,13 @@ static u64 get_rc6(struct drm_i915_private *i915, bool locked)
 		      i915->pmu.suspended_jiffies_last;
 		      i915->pmu.suspended_jiffies_last;
 		val += jiffies - kdev->power.accounting_timestamp;
 		val += jiffies - kdev->power.accounting_timestamp;
 
 
-		spin_unlock_irqrestore(&kdev->power.lock, flags2);
+		spin_unlock(&kdev->power.lock);
 
 
 		val = jiffies_to_nsecs(val);
 		val = jiffies_to_nsecs(val);
 		val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
 		val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
 		i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
 		i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
 
 
-		if (!locked)
-			spin_unlock_irqrestore(&i915->pmu.lock, flags);
+		spin_unlock_irqrestore(&i915->pmu.lock, flags);
 	}
 	}
 
 
 	return val;
 	return val;
@@ -502,7 +496,7 @@ static u64 get_rc6(struct drm_i915_private *i915, bool locked)
 #endif
 #endif
 }
 }
 
 
-static u64 __i915_pmu_event_read(struct perf_event *event, bool locked)
+static u64 __i915_pmu_event_read(struct perf_event *event)
 {
 {
 	struct drm_i915_private *i915 =
 	struct drm_i915_private *i915 =
 		container_of(event->pmu, typeof(*i915), pmu.base);
 		container_of(event->pmu, typeof(*i915), pmu.base);
@@ -540,7 +534,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event, bool locked)
 			val = count_interrupts(i915);
 			val = count_interrupts(i915);
 			break;
 			break;
 		case I915_PMU_RC6_RESIDENCY:
 		case I915_PMU_RC6_RESIDENCY:
-			val = get_rc6(i915, locked);
+			val = get_rc6(i915);
 			break;
 			break;
 		}
 		}
 	}
 	}
@@ -555,7 +549,7 @@ static void i915_pmu_event_read(struct perf_event *event)
 
 
 again:
 again:
 	prev = local64_read(&hwc->prev_count);
 	prev = local64_read(&hwc->prev_count);
-	new = __i915_pmu_event_read(event, false);
+	new = __i915_pmu_event_read(event);
 
 
 	if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
 	if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
 		goto again;
 		goto again;
@@ -605,14 +599,14 @@ static void i915_pmu_enable(struct perf_event *event)
 		engine->pmu.enable_count[sample]++;
 		engine->pmu.enable_count[sample]++;
 	}
 	}
 
 
+	spin_unlock_irqrestore(&i915->pmu.lock, flags);
+
 	/*
 	/*
 	 * Store the current counter value so we can report the correct delta
 	 * Store the current counter value so we can report the correct delta
 	 * for all listeners. Even when the event was already enabled and has
 	 * for all listeners. Even when the event was already enabled and has
 	 * an existing non-zero value.
 	 * an existing non-zero value.
 	 */
 	 */
-	local64_set(&event->hw.prev_count, __i915_pmu_event_read(event, true));
-
-	spin_unlock_irqrestore(&i915->pmu.lock, flags);
+	local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
 }
 }
 
 
 static void i915_pmu_disable(struct perf_event *event)
 static void i915_pmu_disable(struct perf_event *event)

+ 2 - 0
drivers/gpu/drm/i915/i915_request.c

@@ -1081,8 +1081,10 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
 	 * decide whether to preempt the entire chain so that it is ready to
 	 * decide whether to preempt the entire chain so that it is ready to
 	 * run at the earliest possible convenience.
 	 * run at the earliest possible convenience.
 	 */
 	 */
+	rcu_read_lock();
 	if (engine->schedule)
 	if (engine->schedule)
 		engine->schedule(request, request->ctx->priority);
 		engine->schedule(request, request->ctx->priority);
+	rcu_read_unlock();
 
 
 	local_bh_disable();
 	local_bh_disable();
 	i915_sw_fence_commit(&request->submit);
 	i915_sw_fence_commit(&request->submit);

+ 6 - 3
drivers/gpu/drm/i915/intel_ddi.c

@@ -3080,9 +3080,12 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 	intel_encoder->cloneable = 0;
 	intel_encoder->cloneable = 0;
 
 
-	intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
-					  (DDI_BUF_PORT_REVERSAL |
-					   DDI_A_4_LANES);
+	if (INTEL_GEN(dev_priv) >= 11)
+		intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+			DDI_BUF_PORT_REVERSAL;
+	else
+		intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+			(DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
 	intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
 	intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
 	intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
 	intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);